VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 99681

Last change on this file since 99681 was 99665, checked in by vboxsync, 20 months ago

VMM: Nested VMX: bugref:10318 Comment.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 526.3 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 99665 2023-05-08 10:49:02Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
413 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
414 VMX_VMCS64_CTRL_EXIT2_FULL,
415 VMX_VMCS64_CTRL_EXIT2_HIGH,
416
417 /* 64-bit read-only data fields. */
418 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
420
421 /* 64-bit guest-state fields. */
422 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
424 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
425 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
426 VMX_VMCS64_GUEST_PAT_FULL,
427 VMX_VMCS64_GUEST_PAT_HIGH,
428 VMX_VMCS64_GUEST_EFER_FULL,
429 VMX_VMCS64_GUEST_EFER_HIGH,
430 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
432 VMX_VMCS64_GUEST_PDPTE0_FULL,
433 VMX_VMCS64_GUEST_PDPTE0_HIGH,
434 VMX_VMCS64_GUEST_PDPTE1_FULL,
435 VMX_VMCS64_GUEST_PDPTE1_HIGH,
436 VMX_VMCS64_GUEST_PDPTE2_FULL,
437 VMX_VMCS64_GUEST_PDPTE2_HIGH,
438 VMX_VMCS64_GUEST_PDPTE3_FULL,
439 VMX_VMCS64_GUEST_PDPTE3_HIGH,
440 VMX_VMCS64_GUEST_BNDCFGS_FULL,
441 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
442 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
443 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
444 VMX_VMCS64_GUEST_PKRS_FULL,
445 VMX_VMCS64_GUEST_PKRS_HIGH,
446
447 /* 64-bit host-state fields. */
448 VMX_VMCS64_HOST_PAT_FULL,
449 VMX_VMCS64_HOST_PAT_HIGH,
450 VMX_VMCS64_HOST_EFER_FULL,
451 VMX_VMCS64_HOST_EFER_HIGH,
452 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
454 VMX_VMCS64_HOST_PKRS_FULL,
455 VMX_VMCS64_HOST_PKRS_HIGH,
456
457 /* 32-bit control fields. */
458 VMX_VMCS32_CTRL_PIN_EXEC,
459 VMX_VMCS32_CTRL_PROC_EXEC,
460 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
461 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
463 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
464 VMX_VMCS32_CTRL_EXIT,
465 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
466 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
467 VMX_VMCS32_CTRL_ENTRY,
468 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
469 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
470 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
471 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
472 VMX_VMCS32_CTRL_TPR_THRESHOLD,
473 VMX_VMCS32_CTRL_PROC_EXEC2,
474 VMX_VMCS32_CTRL_PLE_GAP,
475 VMX_VMCS32_CTRL_PLE_WINDOW,
476
477 /* 32-bits read-only fields. */
478 VMX_VMCS32_RO_VM_INSTR_ERROR,
479 VMX_VMCS32_RO_EXIT_REASON,
480 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
482 VMX_VMCS32_RO_IDT_VECTORING_INFO,
483 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
484 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
485 VMX_VMCS32_RO_EXIT_INSTR_INFO,
486
487 /* 32-bit guest-state fields. */
488 VMX_VMCS32_GUEST_ES_LIMIT,
489 VMX_VMCS32_GUEST_CS_LIMIT,
490 VMX_VMCS32_GUEST_SS_LIMIT,
491 VMX_VMCS32_GUEST_DS_LIMIT,
492 VMX_VMCS32_GUEST_FS_LIMIT,
493 VMX_VMCS32_GUEST_GS_LIMIT,
494 VMX_VMCS32_GUEST_LDTR_LIMIT,
495 VMX_VMCS32_GUEST_TR_LIMIT,
496 VMX_VMCS32_GUEST_GDTR_LIMIT,
497 VMX_VMCS32_GUEST_IDTR_LIMIT,
498 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_INT_STATE,
507 VMX_VMCS32_GUEST_ACTIVITY_STATE,
508 VMX_VMCS32_GUEST_SMBASE,
509 VMX_VMCS32_GUEST_SYSENTER_CS,
510 VMX_VMCS32_PREEMPT_TIMER_VALUE,
511
512 /* 32-bit host-state fields. */
513 VMX_VMCS32_HOST_SYSENTER_CS,
514
515 /* Natural-width control fields. */
516 VMX_VMCS_CTRL_CR0_MASK,
517 VMX_VMCS_CTRL_CR4_MASK,
518 VMX_VMCS_CTRL_CR0_READ_SHADOW,
519 VMX_VMCS_CTRL_CR4_READ_SHADOW,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
524
525 /* Natural-width read-only data fields. */
526 VMX_VMCS_RO_EXIT_QUALIFICATION,
527 VMX_VMCS_RO_IO_RCX,
528 VMX_VMCS_RO_IO_RSI,
529 VMX_VMCS_RO_IO_RDI,
530 VMX_VMCS_RO_IO_RIP,
531 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
532
533 /* Natural-width guest-state field */
534 VMX_VMCS_GUEST_CR0,
535 VMX_VMCS_GUEST_CR3,
536 VMX_VMCS_GUEST_CR4,
537 VMX_VMCS_GUEST_ES_BASE,
538 VMX_VMCS_GUEST_CS_BASE,
539 VMX_VMCS_GUEST_SS_BASE,
540 VMX_VMCS_GUEST_DS_BASE,
541 VMX_VMCS_GUEST_FS_BASE,
542 VMX_VMCS_GUEST_GS_BASE,
543 VMX_VMCS_GUEST_LDTR_BASE,
544 VMX_VMCS_GUEST_TR_BASE,
545 VMX_VMCS_GUEST_GDTR_BASE,
546 VMX_VMCS_GUEST_IDTR_BASE,
547 VMX_VMCS_GUEST_DR7,
548 VMX_VMCS_GUEST_RSP,
549 VMX_VMCS_GUEST_RIP,
550 VMX_VMCS_GUEST_RFLAGS,
551 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
552 VMX_VMCS_GUEST_SYSENTER_ESP,
553 VMX_VMCS_GUEST_SYSENTER_EIP,
554 VMX_VMCS_GUEST_S_CET,
555 VMX_VMCS_GUEST_SSP,
556 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
557
558 /* Natural-width host-state fields */
559 VMX_VMCS_HOST_CR0,
560 VMX_VMCS_HOST_CR3,
561 VMX_VMCS_HOST_CR4,
562 VMX_VMCS_HOST_FS_BASE,
563 VMX_VMCS_HOST_GS_BASE,
564 VMX_VMCS_HOST_TR_BASE,
565 VMX_VMCS_HOST_GDTR_BASE,
566 VMX_VMCS_HOST_IDTR_BASE,
567 VMX_VMCS_HOST_SYSENTER_ESP,
568 VMX_VMCS_HOST_SYSENTER_EIP,
569 VMX_VMCS_HOST_RSP,
570 VMX_VMCS_HOST_RIP,
571 VMX_VMCS_HOST_S_CET,
572 VMX_VMCS_HOST_SSP,
573 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
574};
575#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
576
577#ifdef HMVMX_USE_FUNCTION_TABLE
578/**
579 * VMX_EXIT dispatch table.
580 */
581static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
582{
583 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
584 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
585 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
586 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
587 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
588 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
589 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
590 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
591 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
592 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
593 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
594 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
595 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
596 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
597 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
598 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
599 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
600 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
601 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
603 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
604 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
605 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
606 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
607 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
608 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
609 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
610 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
611 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
612#else
613 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
614 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
615 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
616 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
617 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
618 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
619 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
620 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
621 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
622#endif
623 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
624 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
625 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
626 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
627 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
628 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
629 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
630 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
632 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
633 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
634 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
635 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
636 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
637 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
639 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
640 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
641 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
642 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
643 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
644 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
646 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
647#else
648 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
651 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
653 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
654#else
655 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
658 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
659 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
660 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
661 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
662 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
663 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
664 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
665 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
666 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
667 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
668 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
669 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
670 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
671 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
672 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
673};
674#endif /* HMVMX_USE_FUNCTION_TABLE */
675
676#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
677static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
678{
679 /* 0 */ "(Not Used)",
680 /* 1 */ "VMCALL executed in VMX root operation.",
681 /* 2 */ "VMCLEAR with invalid physical address.",
682 /* 3 */ "VMCLEAR with VMXON pointer.",
683 /* 4 */ "VMLAUNCH with non-clear VMCS.",
684 /* 5 */ "VMRESUME with non-launched VMCS.",
685 /* 6 */ "VMRESUME after VMXOFF",
686 /* 7 */ "VM-entry with invalid control fields.",
687 /* 8 */ "VM-entry with invalid host state fields.",
688 /* 9 */ "VMPTRLD with invalid physical address.",
689 /* 10 */ "VMPTRLD with VMXON pointer.",
690 /* 11 */ "VMPTRLD with incorrect revision identifier.",
691 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
692 /* 13 */ "VMWRITE to read-only VMCS component.",
693 /* 14 */ "(Not Used)",
694 /* 15 */ "VMXON executed in VMX root operation.",
695 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
696 /* 17 */ "VM-entry with non-launched executing VMCS.",
697 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
698 /* 19 */ "VMCALL with non-clear VMCS.",
699 /* 20 */ "VMCALL with invalid VM-exit control fields.",
700 /* 21 */ "(Not Used)",
701 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
702 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
703 /* 24 */ "VMCALL with invalid SMM-monitor features.",
704 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
705 /* 26 */ "VM-entry with events blocked by MOV SS.",
706 /* 27 */ "(Not Used)",
707 /* 28 */ "Invalid operand to INVEPT/INVVPID."
708};
709#endif /* VBOX_STRICT && LOG_ENABLED */
710
711
712/**
713 * Gets the CR0 guest/host mask.
714 *
715 * These bits typically does not change through the lifetime of a VM. Any bit set in
716 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
717 * by the guest.
718 *
719 * @returns The CR0 guest/host mask.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
723{
724 /*
725 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
726 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
727 *
728 * Furthermore, modifications to any bits that are reserved/unspecified currently
729 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
730 * when future CPUs specify and use currently reserved/unspecified bits.
731 */
732 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
733 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
734 * and @bugref{6944}. */
735 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
736 AssertCompile(RT_HI_U32(VMX_EXIT_HOST_CR0_IGNORE_MASK) == UINT32_C(0xffffffff)); /* Paranoia. */
737 return ( X86_CR0_PE
738 | X86_CR0_NE
739 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
740 | X86_CR0_PG
741 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
742}
743
744
745/**
746 * Gets the CR4 guest/host mask.
747 *
748 * These bits typically does not change through the lifetime of a VM. Any bit set in
749 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
750 * by the guest.
751 *
752 * @returns The CR4 guest/host mask.
753 * @param pVCpu The cross context virtual CPU structure.
754 */
755static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
756{
757 /*
758 * We construct a mask of all CR4 bits that the guest can modify without causing
759 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
760 * a VM-exit when the guest attempts to modify them when executing using
761 * hardware-assisted VMX.
762 *
763 * When a feature is not exposed to the guest (and may be present on the host),
764 * we want to intercept guest modifications to the bit so we can emulate proper
765 * behavior (e.g., #GP).
766 *
767 * Furthermore, only modifications to those bits that don't require immediate
768 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
769 * depends on CR3 which might not always be the guest value while executing
770 * using hardware-assisted VMX.
771 */
772 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
773 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
774#ifdef IN_NEM_DARWIN
775 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
776#endif
777 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
778
779 /*
780 * Paranoia.
781 * Ensure features exposed to the guest are present on the host.
782 */
783 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
784#ifdef IN_NEM_DARWIN
785 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
786#endif
787 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
788
789 uint64_t const fGstMask = X86_CR4_PVI
790 | X86_CR4_TSD
791 | X86_CR4_DE
792 | X86_CR4_MCE
793 | X86_CR4_PCE
794 | X86_CR4_OSXMMEEXCPT
795 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
796#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
797 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
798 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
799#endif
800 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
801 return ~fGstMask;
802}
803
804
805/**
806 * Adds one or more exceptions to the exception bitmap and commits it to the current
807 * VMCS.
808 *
809 * @param pVCpu The cross context virtual CPU structure.
810 * @param pVmxTransient The VMX-transient structure.
811 * @param uXcptMask The exception(s) to add.
812 */
813static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
814{
815 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
816 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
817 if ((uXcptBitmap & uXcptMask) != uXcptMask)
818 {
819 uXcptBitmap |= uXcptMask;
820 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
821 AssertRC(rc);
822 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
823 }
824}
825
826
827/**
828 * Adds an exception to the exception bitmap and commits it to the current VMCS.
829 *
830 * @param pVCpu The cross context virtual CPU structure.
831 * @param pVmxTransient The VMX-transient structure.
832 * @param uXcpt The exception to add.
833 */
834static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
835{
836 Assert(uXcpt <= X86_XCPT_LAST);
837 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
838}
839
840
841/**
842 * Remove one or more exceptions from the exception bitmap and commits it to the
843 * current VMCS.
844 *
845 * This takes care of not removing the exception intercept if a nested-guest
846 * requires the exception to be intercepted.
847 *
848 * @returns VBox status code.
849 * @param pVCpu The cross context virtual CPU structure.
850 * @param pVmxTransient The VMX-transient structure.
851 * @param uXcptMask The exception(s) to remove.
852 */
853static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
854{
855 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
856 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
857 if (uXcptBitmap & uXcptMask)
858 {
859#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
860 if (!pVmxTransient->fIsNestedGuest)
861 { /* likely */ }
862 else
863 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
864#endif
865#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
866 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
867 | RT_BIT(X86_XCPT_DE)
868 | RT_BIT(X86_XCPT_NM)
869 | RT_BIT(X86_XCPT_TS)
870 | RT_BIT(X86_XCPT_UD)
871 | RT_BIT(X86_XCPT_NP)
872 | RT_BIT(X86_XCPT_SS)
873 | RT_BIT(X86_XCPT_GP)
874 | RT_BIT(X86_XCPT_PF)
875 | RT_BIT(X86_XCPT_MF));
876#elif defined(HMVMX_ALWAYS_TRAP_PF)
877 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
878#endif
879 if (uXcptMask)
880 {
881 /* Validate we are not removing any essential exception intercepts. */
882#ifndef IN_NEM_DARWIN
883 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
884#else
885 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
886#endif
887 NOREF(pVCpu);
888 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
889 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
890
891 /* Remove it from the exception bitmap. */
892 uXcptBitmap &= ~uXcptMask;
893
894 /* Commit and update the cache if necessary. */
895 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
896 {
897 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
898 AssertRC(rc);
899 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
900 }
901 }
902 }
903 return VINF_SUCCESS;
904}
905
906
907/**
908 * Remove an exceptions from the exception bitmap and commits it to the current
909 * VMCS.
910 *
911 * @returns VBox status code.
912 * @param pVCpu The cross context virtual CPU structure.
913 * @param pVmxTransient The VMX-transient structure.
914 * @param uXcpt The exception to remove.
915 */
916static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
917{
918 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
919}
920
921#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
922
923/**
924 * Loads the shadow VMCS specified by the VMCS info. object.
925 *
926 * @returns VBox status code.
927 * @param pVmcsInfo The VMCS info. object.
928 *
929 * @remarks Can be called with interrupts disabled.
930 */
931static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
932{
933 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
934 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
935
936 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
937 if (RT_SUCCESS(rc))
938 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
939 return rc;
940}
941
942
943/**
944 * Clears the shadow VMCS specified by the VMCS info. object.
945 *
946 * @returns VBox status code.
947 * @param pVmcsInfo The VMCS info. object.
948 *
949 * @remarks Can be called with interrupts disabled.
950 */
951static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
952{
953 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
954 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
955
956 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
957 if (RT_SUCCESS(rc))
958 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
959 return rc;
960}
961
962
963/**
964 * Switches from and to the specified VMCSes.
965 *
966 * @returns VBox status code.
967 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
968 * @param pVmcsInfoTo The VMCS info. object we are switching to.
969 *
970 * @remarks Called with interrupts disabled.
971 */
972static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
973{
974 /*
975 * Clear the VMCS we are switching out if it has not already been cleared.
976 * This will sync any CPU internal data back to the VMCS.
977 */
978 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
979 {
980 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
981 if (RT_SUCCESS(rc))
982 {
983 /*
984 * The shadow VMCS, if any, would not be active at this point since we
985 * would have cleared it while importing the virtual hardware-virtualization
986 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
987 * clear the shadow VMCS here, just assert for safety.
988 */
989 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
990 }
991 else
992 return rc;
993 }
994
995 /*
996 * Clear the VMCS we are switching to if it has not already been cleared.
997 * This will initialize the VMCS launch state to "clear" required for loading it.
998 *
999 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1000 */
1001 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1002 {
1003 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1004 if (RT_SUCCESS(rc))
1005 { /* likely */ }
1006 else
1007 return rc;
1008 }
1009
1010 /*
1011 * Finally, load the VMCS we are switching to.
1012 */
1013 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1014}
1015
1016
1017/**
1018 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1019 * caller.
1020 *
1021 * @returns VBox status code.
1022 * @param pVCpu The cross context virtual CPU structure.
1023 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1024 * true) or guest VMCS (pass false).
1025 */
1026static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1027{
1028 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1029 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1030
1031 PVMXVMCSINFO pVmcsInfoFrom;
1032 PVMXVMCSINFO pVmcsInfoTo;
1033 if (fSwitchToNstGstVmcs)
1034 {
1035 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1036 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1037 }
1038 else
1039 {
1040 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1041 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1042 }
1043
1044 /*
1045 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1046 * preemption hook code path acquires the current VMCS.
1047 */
1048 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1049
1050 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1051 if (RT_SUCCESS(rc))
1052 {
1053 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1054 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1055
1056 /*
1057 * If we are switching to a VMCS that was executed on a different host CPU or was
1058 * never executed before, flag that we need to export the host state before executing
1059 * guest/nested-guest code using hardware-assisted VMX.
1060 *
1061 * This could probably be done in a preemptible context since the preemption hook
1062 * will flag the necessary change in host context. However, since preemption is
1063 * already disabled and to avoid making assumptions about host specific code in
1064 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1065 * disabled.
1066 */
1067 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1068 { /* likely */ }
1069 else
1070 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1071
1072 ASMSetFlags(fEFlags);
1073
1074 /*
1075 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1076 * flag that we need to update the host MSR values there. Even if we decide in the
1077 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1078 * if its content differs, we would have to update the host MSRs anyway.
1079 */
1080 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1081 }
1082 else
1083 ASMSetFlags(fEFlags);
1084 return rc;
1085}
1086
1087#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1088#ifdef VBOX_STRICT
1089
1090/**
1091 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1092 * transient structure.
1093 *
1094 * @param pVCpu The cross context virtual CPU structure.
1095 * @param pVmxTransient The VMX-transient structure.
1096 */
1097DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1098{
1099 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1100 AssertRC(rc);
1101}
1102
1103
1104/**
1105 * Reads the VM-entry exception error code field from the VMCS into
1106 * the VMX transient structure.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param pVmxTransient The VMX-transient structure.
1110 */
1111DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1112{
1113 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1114 AssertRC(rc);
1115}
1116
1117
1118/**
1119 * Reads the VM-entry exception error code field from the VMCS into
1120 * the VMX transient structure.
1121 *
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param pVmxTransient The VMX-transient structure.
1124 */
1125DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1126{
1127 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1128 AssertRC(rc);
1129}
1130
1131#endif /* VBOX_STRICT */
1132
1133
1134/**
1135 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1136 *
1137 * Don't call directly unless the it's likely that some or all of the fields
1138 * given in @a a_fReadMask have already been read.
1139 *
1140 * @tparam a_fReadMask The fields to read.
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param pVmxTransient The VMX-transient structure.
1143 */
1144template<uint32_t const a_fReadMask>
1145static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1146{
1147 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1148 | HMVMX_READ_EXIT_INSTR_LEN
1149 | HMVMX_READ_EXIT_INSTR_INFO
1150 | HMVMX_READ_IDT_VECTORING_INFO
1151 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1152 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1153 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1154 | HMVMX_READ_GUEST_LINEAR_ADDR
1155 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1156 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1157 )) == 0);
1158
1159 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1160 {
1161 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1162
1163 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1164 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1165 {
1166 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1167 AssertRC(rc);
1168 }
1169 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1170 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1171 {
1172 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1173 AssertRC(rc);
1174 }
1175 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1176 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1177 {
1178 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1179 AssertRC(rc);
1180 }
1181 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1182 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1183 {
1184 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1185 AssertRC(rc);
1186 }
1187 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1188 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1189 {
1190 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1191 AssertRC(rc);
1192 }
1193 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1194 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1195 {
1196 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1197 AssertRC(rc);
1198 }
1199 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1200 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1201 {
1202 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1203 AssertRC(rc);
1204 }
1205 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1206 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1207 {
1208 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1209 AssertRC(rc);
1210 }
1211 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1212 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1213 {
1214 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1215 AssertRC(rc);
1216 }
1217 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1218 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1219 {
1220 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1221 AssertRC(rc);
1222 }
1223
1224 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1225 }
1226}
1227
1228
1229/**
1230 * Reads VMCS fields into the VMXTRANSIENT structure.
1231 *
1232 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1233 * generating an optimized read sequences w/o any conditionals between in
1234 * non-strict builds.
1235 *
1236 * @tparam a_fReadMask The fields to read. One or more of the
1237 * HMVMX_READ_XXX fields ORed together.
1238 * @param pVCpu The cross context virtual CPU structure.
1239 * @param pVmxTransient The VMX-transient structure.
1240 */
1241template<uint32_t const a_fReadMask>
1242DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1243{
1244 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1245 | HMVMX_READ_EXIT_INSTR_LEN
1246 | HMVMX_READ_EXIT_INSTR_INFO
1247 | HMVMX_READ_IDT_VECTORING_INFO
1248 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1249 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1250 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1251 | HMVMX_READ_GUEST_LINEAR_ADDR
1252 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1253 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1254 )) == 0);
1255
1256 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1257 {
1258 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1259 {
1260 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1261 AssertRC(rc);
1262 }
1263 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1264 {
1265 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1266 AssertRC(rc);
1267 }
1268 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1269 {
1270 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1271 AssertRC(rc);
1272 }
1273 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1274 {
1275 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1276 AssertRC(rc);
1277 }
1278 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1279 {
1280 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1281 AssertRC(rc);
1282 }
1283 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1284 {
1285 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1286 AssertRC(rc);
1287 }
1288 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1289 {
1290 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1291 AssertRC(rc);
1292 }
1293 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1294 {
1295 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1296 AssertRC(rc);
1297 }
1298 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1299 {
1300 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1301 AssertRC(rc);
1302 }
1303 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1304 {
1305 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1306 AssertRC(rc);
1307 }
1308
1309 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1310 }
1311 else
1312 {
1313 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1314 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1315 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1316 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1317 }
1318}
1319
1320
1321#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1322/**
1323 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1324 *
1325 * @param pVCpu The cross context virtual CPU structure.
1326 * @param pVmxTransient The VMX-transient structure.
1327 */
1328static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1329{
1330 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1331 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1332 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1333 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1334 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1336 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1337 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1338 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1339 AssertRC(rc);
1340 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1341 | HMVMX_READ_EXIT_INSTR_LEN
1342 | HMVMX_READ_EXIT_INSTR_INFO
1343 | HMVMX_READ_IDT_VECTORING_INFO
1344 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1345 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1346 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1347 | HMVMX_READ_GUEST_LINEAR_ADDR
1348 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1349}
1350#endif
1351
1352/**
1353 * Verifies that our cached values of the VMCS fields are all consistent with
1354 * what's actually present in the VMCS.
1355 *
1356 * @returns VBox status code.
1357 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1358 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1359 * VMCS content. HMCPU error-field is
1360 * updated, see VMX_VCI_XXX.
1361 * @param pVCpu The cross context virtual CPU structure.
1362 * @param pVmcsInfo The VMCS info. object.
1363 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1364 */
1365static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1366{
1367 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1368
1369 uint32_t u32Val;
1370 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1371 AssertRC(rc);
1372 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1373 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1374 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1375 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1376
1377 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1378 AssertRC(rc);
1379 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1380 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1381 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1382 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1383
1384 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1385 AssertRC(rc);
1386 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1387 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1388 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1389 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1390
1391 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1392 AssertRC(rc);
1393 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1394 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1395 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1396 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1397
1398 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1399 {
1400 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1401 AssertRC(rc);
1402 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1403 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1404 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1405 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1406 }
1407
1408 uint64_t u64Val;
1409 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1410 {
1411 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1414 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417 }
1418
1419 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1420 AssertRC(rc);
1421 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1422 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1423 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1424 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1425
1426 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1427 AssertRC(rc);
1428 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1429 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1430 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1431 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1432
1433 NOREF(pcszVmcs);
1434 return VINF_SUCCESS;
1435}
1436
1437
1438/**
1439 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1440 * VMCS.
1441 *
1442 * This is typically required when the guest changes paging mode.
1443 *
1444 * @returns VBox status code.
1445 * @param pVCpu The cross context virtual CPU structure.
1446 * @param pVmxTransient The VMX-transient structure.
1447 *
1448 * @remarks Requires EFER.
1449 * @remarks No-long-jump zone!!!
1450 */
1451static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1452{
1453 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1454 {
1455 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1456 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1457
1458 /*
1459 * VM-entry controls.
1460 */
1461 {
1462 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1463 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1464
1465 /*
1466 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1467 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1468 *
1469 * For nested-guests, this is a mandatory VM-entry control. It's also
1470 * required because we do not want to leak host bits to the nested-guest.
1471 */
1472 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1473
1474 /*
1475 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1476 *
1477 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1478 * required to get the nested-guest working with hardware-assisted VMX execution.
1479 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1480 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1481 * here rather than while merging the guest VMCS controls.
1482 */
1483 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1484 {
1485 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1486 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1487 }
1488 else
1489 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1490
1491 /*
1492 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1493 *
1494 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1495 * regardless of whether the nested-guest VMCS specifies it because we are free to
1496 * load whatever MSRs we require and we do not need to modify the guest visible copy
1497 * of the VM-entry MSR load area.
1498 */
1499 if ( g_fHmVmxSupportsVmcsEfer
1500#ifndef IN_NEM_DARWIN
1501 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1502#endif
1503 )
1504 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1505 else
1506 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1507
1508 /*
1509 * The following should -not- be set (since we're not in SMM mode):
1510 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1511 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1512 */
1513
1514 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1515 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1516
1517 if ((fVal & fZap) == fVal)
1518 { /* likely */ }
1519 else
1520 {
1521 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1522 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1523 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1524 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1525 }
1526
1527 /* Commit it to the VMCS. */
1528 if (pVmcsInfo->u32EntryCtls != fVal)
1529 {
1530 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1531 AssertRC(rc);
1532 pVmcsInfo->u32EntryCtls = fVal;
1533 }
1534 }
1535
1536 /*
1537 * VM-exit controls.
1538 */
1539 {
1540 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1541 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1542
1543 /*
1544 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1545 * supported the 1-setting of this bit.
1546 *
1547 * For nested-guests, we set the "save debug controls" as the converse
1548 * "load debug controls" is mandatory for nested-guests anyway.
1549 */
1550 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1551
1552 /*
1553 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1554 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1555 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1556 * vmxHCExportHostMsrs().
1557 *
1558 * For nested-guests, we always set this bit as we do not support 32-bit
1559 * hosts.
1560 */
1561 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1562
1563#ifndef IN_NEM_DARWIN
1564 /*
1565 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1566 *
1567 * For nested-guests, we should use the "save IA32_EFER" control if we also
1568 * used the "load IA32_EFER" control while exporting VM-entry controls.
1569 */
1570 if ( g_fHmVmxSupportsVmcsEfer
1571 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1572 {
1573 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1574 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1575 }
1576#endif
1577
1578 /*
1579 * Enable saving of the VMX-preemption timer value on VM-exit.
1580 * For nested-guests, currently not exposed/used.
1581 */
1582 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1583 * the timer value. */
1584 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1585 {
1586 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1587 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1588 }
1589
1590 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1591 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1592
1593 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1594 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1595 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1596
1597 if ((fVal & fZap) == fVal)
1598 { /* likely */ }
1599 else
1600 {
1601 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1602 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1603 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1604 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1605 }
1606
1607 /* Commit it to the VMCS. */
1608 if (pVmcsInfo->u32ExitCtls != fVal)
1609 {
1610 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1611 AssertRC(rc);
1612 pVmcsInfo->u32ExitCtls = fVal;
1613 }
1614 }
1615
1616 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1617 }
1618 return VINF_SUCCESS;
1619}
1620
1621
1622/**
1623 * Sets the TPR threshold in the VMCS.
1624 *
1625 * @param pVCpu The cross context virtual CPU structure.
1626 * @param pVmcsInfo The VMCS info. object.
1627 * @param u32TprThreshold The TPR threshold (task-priority class only).
1628 */
1629DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1630{
1631 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1632 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1633 RT_NOREF(pVmcsInfo);
1634 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1635 AssertRC(rc);
1636}
1637
1638
1639/**
1640 * Exports the guest APIC TPR state into the VMCS.
1641 *
1642 * @param pVCpu The cross context virtual CPU structure.
1643 * @param pVmxTransient The VMX-transient structure.
1644 *
1645 * @remarks No-long-jump zone!!!
1646 */
1647static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1648{
1649 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1650 {
1651 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1652
1653 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1654 if (!pVmxTransient->fIsNestedGuest)
1655 {
1656 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1657 && APICIsEnabled(pVCpu))
1658 {
1659 /*
1660 * Setup TPR shadowing.
1661 */
1662 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1663 {
1664 bool fPendingIntr = false;
1665 uint8_t u8Tpr = 0;
1666 uint8_t u8PendingIntr = 0;
1667 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1668 AssertRC(rc);
1669
1670 /*
1671 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1672 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1673 * priority of the pending interrupt so we can deliver the interrupt. If there
1674 * are no interrupts pending, set threshold to 0 to not cause any
1675 * TPR-below-threshold VM-exits.
1676 */
1677 uint32_t u32TprThreshold = 0;
1678 if (fPendingIntr)
1679 {
1680 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1681 (which is the Task-Priority Class). */
1682 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1683 const uint8_t u8TprPriority = u8Tpr >> 4;
1684 if (u8PendingPriority <= u8TprPriority)
1685 u32TprThreshold = u8PendingPriority;
1686 }
1687
1688 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1689 }
1690 }
1691 }
1692 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1693 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1694 }
1695}
1696
1697
1698/**
1699 * Gets the guest interruptibility-state and updates related internal eflags
1700 * inhibition state.
1701 *
1702 * @returns Guest's interruptibility-state.
1703 * @param pVCpu The cross context virtual CPU structure.
1704 *
1705 * @remarks No-long-jump zone!!!
1706 */
1707static uint32_t vmxHCGetGuestIntrStateWithUpdate(PVMCPUCC pVCpu)
1708{
1709 uint32_t fIntrState;
1710
1711 /*
1712 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1713 */
1714 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1715 fIntrState = 0;
1716 else
1717 {
1718 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1719 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1720
1721 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1722 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1723 else
1724 {
1725 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1726
1727 /* Block-by-STI must not be set when interrupts are disabled. */
1728 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1729 }
1730 }
1731
1732 /*
1733 * Check if we should inhibit NMI delivery.
1734 */
1735 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1736 { /* likely */ }
1737 else
1738 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1739
1740 /*
1741 * Validate.
1742 */
1743 /* We don't support block-by-SMI yet.*/
1744 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1745
1746 return fIntrState;
1747}
1748
1749
1750/**
1751 * Exports the exception intercepts required for guest execution in the VMCS.
1752 *
1753 * @param pVCpu The cross context virtual CPU structure.
1754 * @param pVmxTransient The VMX-transient structure.
1755 *
1756 * @remarks No-long-jump zone!!!
1757 */
1758static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1759{
1760 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1761 {
1762 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1763 if ( !pVmxTransient->fIsNestedGuest
1764 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1765 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1766 else
1767 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1768
1769 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1770 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1771 }
1772}
1773
1774
1775/**
1776 * Exports the guest's RIP into the guest-state area in the VMCS.
1777 *
1778 * @param pVCpu The cross context virtual CPU structure.
1779 *
1780 * @remarks No-long-jump zone!!!
1781 */
1782static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1783{
1784 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1785 {
1786 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1787
1788 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1789 AssertRC(rc);
1790
1791 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1792 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1793 }
1794}
1795
1796
1797/**
1798 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1799 *
1800 * @param pVCpu The cross context virtual CPU structure.
1801 * @param pVmxTransient The VMX-transient structure.
1802 *
1803 * @remarks No-long-jump zone!!!
1804 */
1805static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1806{
1807 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1808 {
1809 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1810
1811 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1812 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1813 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1814 Use 32-bit VMWRITE. */
1815 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1816 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1817 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1818
1819#ifndef IN_NEM_DARWIN
1820 /*
1821 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1822 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1823 * can run the real-mode guest code under Virtual 8086 mode.
1824 */
1825 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1826 if (pVmcsInfo->RealMode.fRealOnV86Active)
1827 {
1828 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1829 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1830 Assert(!pVmxTransient->fIsNestedGuest);
1831 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1832 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1833 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1834 }
1835#else
1836 RT_NOREF(pVmxTransient);
1837#endif
1838
1839 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1840 AssertRC(rc);
1841
1842 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1843 Log4Func(("eflags=%#RX32\n", fEFlags));
1844 }
1845}
1846
1847
1848#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1849/**
1850 * Copies the nested-guest VMCS to the shadow VMCS.
1851 *
1852 * @returns VBox status code.
1853 * @param pVCpu The cross context virtual CPU structure.
1854 * @param pVmcsInfo The VMCS info. object.
1855 *
1856 * @remarks No-long-jump zone!!!
1857 */
1858static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1859{
1860 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1861 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1862
1863 /*
1864 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1865 * current VMCS, as we may try saving guest lazy MSRs.
1866 *
1867 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1868 * calling the import VMCS code which is currently performing the guest MSR reads
1869 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1870 * and the rest of the VMX leave session machinery.
1871 */
1872 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1873
1874 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1875 if (RT_SUCCESS(rc))
1876 {
1877 /*
1878 * Copy all guest read/write VMCS fields.
1879 *
1880 * We don't check for VMWRITE failures here for performance reasons and
1881 * because they are not expected to fail, barring irrecoverable conditions
1882 * like hardware errors.
1883 */
1884 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1885 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1886 {
1887 uint64_t u64Val;
1888 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1889 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1890 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1891 }
1892
1893 /*
1894 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1895 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1896 */
1897 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1898 {
1899 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1900 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1901 {
1902 uint64_t u64Val;
1903 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1904 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1905 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1906 }
1907 }
1908
1909 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1910 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1911 }
1912
1913 ASMSetFlags(fEFlags);
1914 return rc;
1915}
1916
1917
1918/**
1919 * Copies the shadow VMCS to the nested-guest VMCS.
1920 *
1921 * @returns VBox status code.
1922 * @param pVCpu The cross context virtual CPU structure.
1923 * @param pVmcsInfo The VMCS info. object.
1924 *
1925 * @remarks Called with interrupts disabled.
1926 */
1927static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1928{
1929 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1930 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1931 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1932
1933 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1934 if (RT_SUCCESS(rc))
1935 {
1936 /*
1937 * Copy guest read/write fields from the shadow VMCS.
1938 * Guest read-only fields cannot be modified, so no need to copy them.
1939 *
1940 * We don't check for VMREAD failures here for performance reasons and
1941 * because they are not expected to fail, barring irrecoverable conditions
1942 * like hardware errors.
1943 */
1944 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1945 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1946 {
1947 uint64_t u64Val;
1948 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1949 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1950 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1951 }
1952
1953 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1954 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1955 }
1956 return rc;
1957}
1958
1959
1960/**
1961 * Enables VMCS shadowing for the given VMCS info. object.
1962 *
1963 * @param pVCpu The cross context virtual CPU structure.
1964 * @param pVmcsInfo The VMCS info. object.
1965 *
1966 * @remarks No-long-jump zone!!!
1967 */
1968static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1969{
1970 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1971 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1972 {
1973 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1974 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1975 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1976 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1977 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1978 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1979 Log4Func(("Enabled\n"));
1980 }
1981}
1982
1983
1984/**
1985 * Disables VMCS shadowing for the given VMCS info. object.
1986 *
1987 * @param pVCpu The cross context virtual CPU structure.
1988 * @param pVmcsInfo The VMCS info. object.
1989 *
1990 * @remarks No-long-jump zone!!!
1991 */
1992static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1993{
1994 /*
1995 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1996 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1997 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1998 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1999 *
2000 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2001 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2002 */
2003 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2004 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2005 {
2006 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2007 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2008 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2009 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2010 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2011 Log4Func(("Disabled\n"));
2012 }
2013}
2014#endif
2015
2016
2017/**
2018 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2019 *
2020 * The guest FPU state is always pre-loaded hence we don't need to bother about
2021 * sharing FPU related CR0 bits between the guest and host.
2022 *
2023 * @returns VBox status code.
2024 * @param pVCpu The cross context virtual CPU structure.
2025 * @param pVmxTransient The VMX-transient structure.
2026 *
2027 * @remarks No-long-jump zone!!!
2028 */
2029static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2030{
2031 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2032 {
2033 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2034 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2035
2036 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2037 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2038 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2039 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2040 else
2041 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2042
2043 if (!pVmxTransient->fIsNestedGuest)
2044 {
2045 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2046 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2047 uint64_t const u64ShadowCr0 = u64GuestCr0;
2048 Assert(!RT_HI_U32(u64GuestCr0));
2049
2050 /*
2051 * Setup VT-x's view of the guest CR0.
2052 */
2053 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2054 if (VM_IS_VMX_NESTED_PAGING(pVM))
2055 {
2056#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2057 if (CPUMIsGuestPagingEnabled(pVCpu))
2058 {
2059 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2060 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2061 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2062 }
2063 else
2064 {
2065 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2066 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2067 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2068 }
2069
2070 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2071 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2072 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2073#endif
2074 }
2075 else
2076 {
2077 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2078 u64GuestCr0 |= X86_CR0_WP;
2079 }
2080
2081 /*
2082 * Guest FPU bits.
2083 *
2084 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2085 * using CR0.TS.
2086 *
2087 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2088 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2089 */
2090 u64GuestCr0 |= X86_CR0_NE;
2091
2092 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2093 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2094
2095 /*
2096 * Update exception intercepts.
2097 */
2098 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2099#ifndef IN_NEM_DARWIN
2100 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2101 {
2102 Assert(PDMVmmDevHeapIsEnabled(pVM));
2103 Assert(pVM->hm.s.vmx.pRealModeTSS);
2104 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2105 }
2106 else
2107#endif
2108 {
2109 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2110 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2111 if (fInterceptMF)
2112 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2113 }
2114
2115 /* Additional intercepts for debugging, define these yourself explicitly. */
2116#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2117 uXcptBitmap |= 0
2118 | RT_BIT(X86_XCPT_BP)
2119 | RT_BIT(X86_XCPT_DE)
2120 | RT_BIT(X86_XCPT_NM)
2121 | RT_BIT(X86_XCPT_TS)
2122 | RT_BIT(X86_XCPT_UD)
2123 | RT_BIT(X86_XCPT_NP)
2124 | RT_BIT(X86_XCPT_SS)
2125 | RT_BIT(X86_XCPT_GP)
2126 | RT_BIT(X86_XCPT_PF)
2127 | RT_BIT(X86_XCPT_MF)
2128 ;
2129#elif defined(HMVMX_ALWAYS_TRAP_PF)
2130 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2131#endif
2132 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2133 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2134 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2135 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2136 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2137
2138 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2139 u64GuestCr0 |= fSetCr0;
2140 u64GuestCr0 &= fZapCr0;
2141 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2142
2143 Assert(!RT_HI_U32(u64GuestCr0));
2144 Assert(u64GuestCr0 & X86_CR0_NE);
2145
2146 /* Commit the CR0 and related fields to the guest VMCS. */
2147 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2148 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2149 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2150 {
2151 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2152 AssertRC(rc);
2153 }
2154 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2155 {
2156 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2157 AssertRC(rc);
2158 }
2159
2160 /* Update our caches. */
2161 pVmcsInfo->u32ProcCtls = uProcCtls;
2162 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2163
2164 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2165 }
2166 else
2167 {
2168 /*
2169 * With nested-guests, we may have extended the guest/host mask here since we
2170 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2171 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2172 * originally supplied. We must copy those bits from the nested-guest CR0 into
2173 * the nested-guest CR0 read-shadow.
2174 */
2175 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2176 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2177 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2178
2179 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2180 u64GuestCr0 |= fSetCr0;
2181 u64GuestCr0 &= fZapCr0;
2182 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2183
2184 Assert(!RT_HI_U32(u64GuestCr0));
2185 Assert(u64GuestCr0 & X86_CR0_NE);
2186
2187 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2188 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2189 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2190
2191 Log4Func(("cr0=%#RX64 shadow=%#RX64 vmcs_read_shw=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0,
2192 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u, fSetCr0, fZapCr0));
2193 }
2194
2195 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2196 }
2197
2198 return VINF_SUCCESS;
2199}
2200
2201
2202/**
2203 * Exports the guest control registers (CR3, CR4) into the guest-state area
2204 * in the VMCS.
2205 *
2206 * @returns VBox strict status code.
2207 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2208 * without unrestricted guest access and the VMMDev is not presently
2209 * mapped (e.g. EFI32).
2210 *
2211 * @param pVCpu The cross context virtual CPU structure.
2212 * @param pVmxTransient The VMX-transient structure.
2213 *
2214 * @remarks No-long-jump zone!!!
2215 */
2216static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2217{
2218 int rc = VINF_SUCCESS;
2219 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2220
2221 /*
2222 * Guest CR2.
2223 * It's always loaded in the assembler code. Nothing to do here.
2224 */
2225
2226 /*
2227 * Guest CR3.
2228 */
2229 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2230 {
2231 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2232
2233 if (VM_IS_VMX_NESTED_PAGING(pVM))
2234 {
2235#ifndef IN_NEM_DARWIN
2236 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2237 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2238
2239 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2240 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2241 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2242 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2243
2244 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2245 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2246 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2247
2248 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2249 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2250 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2251 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2252 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2253 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2254 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2255
2256 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2257 AssertRC(rc);
2258#endif
2259
2260 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2261 uint64_t u64GuestCr3 = pCtx->cr3;
2262 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2263 || CPUMIsGuestPagingEnabledEx(pCtx))
2264 {
2265 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2266 if (CPUMIsGuestInPAEModeEx(pCtx))
2267 {
2268 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2269 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2270 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2271 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2272 }
2273
2274 /*
2275 * The guest's view of its CR3 is unblemished with nested paging when the
2276 * guest is using paging or we have unrestricted guest execution to handle
2277 * the guest when it's not using paging.
2278 */
2279 }
2280#ifndef IN_NEM_DARWIN
2281 else
2282 {
2283 /*
2284 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2285 * thinks it accesses physical memory directly, we use our identity-mapped
2286 * page table to map guest-linear to guest-physical addresses. EPT takes care
2287 * of translating it to host-physical addresses.
2288 */
2289 RTGCPHYS GCPhys;
2290 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2291
2292 /* We obtain it here every time as the guest could have relocated this PCI region. */
2293 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2294 if (RT_SUCCESS(rc))
2295 { /* likely */ }
2296 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2297 {
2298 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2299 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2300 }
2301 else
2302 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2303
2304 u64GuestCr3 = GCPhys;
2305 }
2306#endif
2307
2308 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2309 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2310 AssertRC(rc);
2311 }
2312 else
2313 {
2314 Assert(!pVmxTransient->fIsNestedGuest);
2315 /* Non-nested paging case, just use the hypervisor's CR3. */
2316 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2317
2318 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2319 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2320 AssertRC(rc);
2321 }
2322
2323 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2324 }
2325
2326 /*
2327 * Guest CR4.
2328 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2329 */
2330 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2331 {
2332 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2333 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2334
2335 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2336 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2337
2338 /*
2339 * With nested-guests, we may have extended the guest/host mask here (since we
2340 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2341 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2342 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2343 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2344 */
2345 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2346 uint64_t u64GuestCr4 = pCtx->cr4;
2347 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2348 ? pCtx->cr4
2349 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2350 Assert(!RT_HI_U32(u64GuestCr4));
2351
2352#ifndef IN_NEM_DARWIN
2353 /*
2354 * Setup VT-x's view of the guest CR4.
2355 *
2356 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2357 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2358 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2359 *
2360 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2361 */
2362 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2363 {
2364 Assert(pVM->hm.s.vmx.pRealModeTSS);
2365 Assert(PDMVmmDevHeapIsEnabled(pVM));
2366 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2367 }
2368#endif
2369
2370 if (VM_IS_VMX_NESTED_PAGING(pVM))
2371 {
2372 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2373 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2374 {
2375 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2376 u64GuestCr4 |= X86_CR4_PSE;
2377 /* Our identity mapping is a 32-bit page directory. */
2378 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2379 }
2380 /* else use guest CR4.*/
2381 }
2382 else
2383 {
2384 Assert(!pVmxTransient->fIsNestedGuest);
2385
2386 /*
2387 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2388 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2389 */
2390 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2391 {
2392 case PGMMODE_REAL: /* Real-mode. */
2393 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2394 case PGMMODE_32_BIT: /* 32-bit paging. */
2395 {
2396 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2397 break;
2398 }
2399
2400 case PGMMODE_PAE: /* PAE paging. */
2401 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2402 {
2403 u64GuestCr4 |= X86_CR4_PAE;
2404 break;
2405 }
2406
2407 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2408 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2409 {
2410#ifdef VBOX_WITH_64_BITS_GUESTS
2411 /* For our assumption in vmxHCShouldSwapEferMsr. */
2412 Assert(u64GuestCr4 & X86_CR4_PAE);
2413 break;
2414#endif
2415 }
2416 default:
2417 AssertFailed();
2418 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2419 }
2420 }
2421
2422 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2423 u64GuestCr4 |= fSetCr4;
2424 u64GuestCr4 &= fZapCr4;
2425
2426 Assert(!RT_HI_U32(u64GuestCr4));
2427 Assert(u64GuestCr4 & X86_CR4_VMXE);
2428
2429 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2430 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2431 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2432
2433#ifndef IN_NEM_DARWIN
2434 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2435 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2436 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2437 {
2438 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2439 hmR0VmxUpdateStartVmFunction(pVCpu);
2440 }
2441#endif
2442
2443 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2444
2445 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2446 }
2447 return rc;
2448}
2449
2450
2451#ifdef VBOX_STRICT
2452/**
2453 * Strict function to validate segment registers.
2454 *
2455 * @param pVCpu The cross context virtual CPU structure.
2456 * @param pVmcsInfo The VMCS info. object.
2457 *
2458 * @remarks Will import guest CR0 on strict builds during validation of
2459 * segments.
2460 */
2461static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2462{
2463 /*
2464 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2465 *
2466 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2467 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2468 * unusable bit and doesn't change the guest-context value.
2469 */
2470 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2471 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2472 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2473 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2474 && ( !CPUMIsGuestInRealModeEx(pCtx)
2475 && !CPUMIsGuestInV86ModeEx(pCtx)))
2476 {
2477 /* Protected mode checks */
2478 /* CS */
2479 Assert(pCtx->cs.Attr.n.u1Present);
2480 Assert(!(pCtx->cs.Attr.u & 0xf00));
2481 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2482 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2483 || !(pCtx->cs.Attr.n.u1Granularity));
2484 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2485 || (pCtx->cs.Attr.n.u1Granularity));
2486 /* CS cannot be loaded with NULL in protected mode. */
2487 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2488 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2489 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2490 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2491 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2492 else
2493 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2494 /* SS */
2495 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2496 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2497 if ( !(pCtx->cr0 & X86_CR0_PE)
2498 || pCtx->cs.Attr.n.u4Type == 3)
2499 {
2500 Assert(!pCtx->ss.Attr.n.u2Dpl);
2501 }
2502 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2503 {
2504 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2505 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2506 Assert(pCtx->ss.Attr.n.u1Present);
2507 Assert(!(pCtx->ss.Attr.u & 0xf00));
2508 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2509 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2510 || !(pCtx->ss.Attr.n.u1Granularity));
2511 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2512 || (pCtx->ss.Attr.n.u1Granularity));
2513 }
2514 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2515 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2516 {
2517 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2518 Assert(pCtx->ds.Attr.n.u1Present);
2519 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2520 Assert(!(pCtx->ds.Attr.u & 0xf00));
2521 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2522 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2523 || !(pCtx->ds.Attr.n.u1Granularity));
2524 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2525 || (pCtx->ds.Attr.n.u1Granularity));
2526 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2527 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2528 }
2529 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2530 {
2531 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2532 Assert(pCtx->es.Attr.n.u1Present);
2533 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2534 Assert(!(pCtx->es.Attr.u & 0xf00));
2535 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2536 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2537 || !(pCtx->es.Attr.n.u1Granularity));
2538 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2539 || (pCtx->es.Attr.n.u1Granularity));
2540 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2541 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2542 }
2543 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2544 {
2545 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2546 Assert(pCtx->fs.Attr.n.u1Present);
2547 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2548 Assert(!(pCtx->fs.Attr.u & 0xf00));
2549 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2550 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2551 || !(pCtx->fs.Attr.n.u1Granularity));
2552 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2553 || (pCtx->fs.Attr.n.u1Granularity));
2554 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2555 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2556 }
2557 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2558 {
2559 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2560 Assert(pCtx->gs.Attr.n.u1Present);
2561 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2562 Assert(!(pCtx->gs.Attr.u & 0xf00));
2563 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2564 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2565 || !(pCtx->gs.Attr.n.u1Granularity));
2566 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2567 || (pCtx->gs.Attr.n.u1Granularity));
2568 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2569 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2570 }
2571 /* 64-bit capable CPUs. */
2572 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2573 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2574 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2575 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2576 }
2577 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2578 || ( CPUMIsGuestInRealModeEx(pCtx)
2579 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2580 {
2581 /* Real and v86 mode checks. */
2582 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2583 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2584#ifndef IN_NEM_DARWIN
2585 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2586 {
2587 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2588 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2589 }
2590 else
2591#endif
2592 {
2593 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2594 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2595 }
2596
2597 /* CS */
2598 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2599 Assert(pCtx->cs.u32Limit == 0xffff);
2600 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2601 /* SS */
2602 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2603 Assert(pCtx->ss.u32Limit == 0xffff);
2604 Assert(u32SSAttr == 0xf3);
2605 /* DS */
2606 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2607 Assert(pCtx->ds.u32Limit == 0xffff);
2608 Assert(u32DSAttr == 0xf3);
2609 /* ES */
2610 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2611 Assert(pCtx->es.u32Limit == 0xffff);
2612 Assert(u32ESAttr == 0xf3);
2613 /* FS */
2614 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2615 Assert(pCtx->fs.u32Limit == 0xffff);
2616 Assert(u32FSAttr == 0xf3);
2617 /* GS */
2618 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2619 Assert(pCtx->gs.u32Limit == 0xffff);
2620 Assert(u32GSAttr == 0xf3);
2621 /* 64-bit capable CPUs. */
2622 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2623 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2624 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2625 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2626 }
2627}
2628#endif /* VBOX_STRICT */
2629
2630
2631/**
2632 * Exports a guest segment register into the guest-state area in the VMCS.
2633 *
2634 * @returns VBox status code.
2635 * @param pVCpu The cross context virtual CPU structure.
2636 * @param pVmcsInfo The VMCS info. object.
2637 * @param iSegReg The segment register number (X86_SREG_XXX).
2638 * @param pSelReg Pointer to the segment selector.
2639 *
2640 * @remarks No-long-jump zone!!!
2641 */
2642static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2643{
2644 Assert(iSegReg < X86_SREG_COUNT);
2645
2646 uint32_t u32Access = pSelReg->Attr.u;
2647#ifndef IN_NEM_DARWIN
2648 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2649#endif
2650 {
2651 /*
2652 * The way to differentiate between whether this is really a null selector or was just
2653 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2654 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2655 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2656 * NULL selectors loaded in protected-mode have their attribute as 0.
2657 */
2658 if (u32Access)
2659 { }
2660 else
2661 u32Access = X86DESCATTR_UNUSABLE;
2662 }
2663#ifndef IN_NEM_DARWIN
2664 else
2665 {
2666 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2667 u32Access = 0xf3;
2668 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2669 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2670 RT_NOREF_PV(pVCpu);
2671 }
2672#else
2673 RT_NOREF(pVmcsInfo);
2674#endif
2675
2676 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2677 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2678 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2679
2680 /*
2681 * Commit it to the VMCS.
2682 */
2683 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2684 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2685 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2686 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2687 return VINF_SUCCESS;
2688}
2689
2690
2691/**
2692 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2693 * area in the VMCS.
2694 *
2695 * @returns VBox status code.
2696 * @param pVCpu The cross context virtual CPU structure.
2697 * @param pVmxTransient The VMX-transient structure.
2698 *
2699 * @remarks Will import guest CR0 on strict builds during validation of
2700 * segments.
2701 * @remarks No-long-jump zone!!!
2702 */
2703static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2704{
2705 int rc = VERR_INTERNAL_ERROR_5;
2706#ifndef IN_NEM_DARWIN
2707 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2708#endif
2709 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2710 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2711#ifndef IN_NEM_DARWIN
2712 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2713#endif
2714
2715 /*
2716 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2717 */
2718 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2719 {
2720 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2721 {
2722 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2723#ifndef IN_NEM_DARWIN
2724 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2725 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2726#endif
2727 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2728 AssertRC(rc);
2729 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2730 }
2731
2732 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2733 {
2734 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2735#ifndef IN_NEM_DARWIN
2736 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2737 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2738#endif
2739 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2740 AssertRC(rc);
2741 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2742 }
2743
2744 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2745 {
2746 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2747#ifndef IN_NEM_DARWIN
2748 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2749 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2750#endif
2751 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2752 AssertRC(rc);
2753 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2754 }
2755
2756 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2757 {
2758 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2759#ifndef IN_NEM_DARWIN
2760 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2761 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2762#endif
2763 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2764 AssertRC(rc);
2765 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2766 }
2767
2768 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2769 {
2770 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2771#ifndef IN_NEM_DARWIN
2772 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2773 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2774#endif
2775 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2776 AssertRC(rc);
2777 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2778 }
2779
2780 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2781 {
2782 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2783#ifndef IN_NEM_DARWIN
2784 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2785 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2786#endif
2787 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2788 AssertRC(rc);
2789 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2790 }
2791
2792#ifdef VBOX_STRICT
2793 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2794#endif
2795 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2796 pCtx->cs.Attr.u));
2797 }
2798
2799 /*
2800 * Guest TR.
2801 */
2802 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2803 {
2804 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2805
2806 /*
2807 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2808 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2809 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2810 */
2811 uint16_t u16Sel;
2812 uint32_t u32Limit;
2813 uint64_t u64Base;
2814 uint32_t u32AccessRights;
2815#ifndef IN_NEM_DARWIN
2816 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2817#endif
2818 {
2819 u16Sel = pCtx->tr.Sel;
2820 u32Limit = pCtx->tr.u32Limit;
2821 u64Base = pCtx->tr.u64Base;
2822 u32AccessRights = pCtx->tr.Attr.u;
2823 }
2824#ifndef IN_NEM_DARWIN
2825 else
2826 {
2827 Assert(!pVmxTransient->fIsNestedGuest);
2828 Assert(pVM->hm.s.vmx.pRealModeTSS);
2829 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2830
2831 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2832 RTGCPHYS GCPhys;
2833 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2834 AssertRCReturn(rc, rc);
2835
2836 X86DESCATTR DescAttr;
2837 DescAttr.u = 0;
2838 DescAttr.n.u1Present = 1;
2839 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2840
2841 u16Sel = 0;
2842 u32Limit = HM_VTX_TSS_SIZE;
2843 u64Base = GCPhys;
2844 u32AccessRights = DescAttr.u;
2845 }
2846#endif
2847
2848 /* Validate. */
2849 Assert(!(u16Sel & RT_BIT(2)));
2850 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2851 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2852 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2853 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2854 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2855 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2856 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2857 Assert( (u32Limit & 0xfff) == 0xfff
2858 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2859 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2860 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2861
2862 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2863 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2864 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2865 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2866
2867 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2868 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2869 }
2870
2871 /*
2872 * Guest GDTR.
2873 */
2874 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2875 {
2876 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2877
2878 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2879 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2880
2881 /* Validate. */
2882 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2883
2884 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2885 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2886 }
2887
2888 /*
2889 * Guest LDTR.
2890 */
2891 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2892 {
2893 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2894
2895 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2896 uint32_t u32Access;
2897 if ( !pVmxTransient->fIsNestedGuest
2898 && !pCtx->ldtr.Attr.u)
2899 u32Access = X86DESCATTR_UNUSABLE;
2900 else
2901 u32Access = pCtx->ldtr.Attr.u;
2902
2903 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2904 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2905 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2906 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2907
2908 /* Validate. */
2909 if (!(u32Access & X86DESCATTR_UNUSABLE))
2910 {
2911 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2912 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2913 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2914 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2915 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2916 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2917 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2918 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2919 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2920 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2921 }
2922
2923 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2924 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2925 }
2926
2927 /*
2928 * Guest IDTR.
2929 */
2930 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2931 {
2932 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2933
2934 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2935 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2936
2937 /* Validate. */
2938 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2939
2940 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2941 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2942 }
2943
2944 return VINF_SUCCESS;
2945}
2946
2947
2948/**
2949 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2950 * VM-exit interruption info type.
2951 *
2952 * @returns The IEM exception flags.
2953 * @param uVector The event vector.
2954 * @param uVmxEventType The VMX event type.
2955 *
2956 * @remarks This function currently only constructs flags required for
2957 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2958 * and CR2 aspects of an exception are not included).
2959 */
2960static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2961{
2962 uint32_t fIemXcptFlags;
2963 switch (uVmxEventType)
2964 {
2965 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2966 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2967 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2968 break;
2969
2970 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2971 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2972 break;
2973
2974 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2975 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2976 break;
2977
2978 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2979 {
2980 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2981 if (uVector == X86_XCPT_BP)
2982 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2983 else if (uVector == X86_XCPT_OF)
2984 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2985 else
2986 {
2987 fIemXcptFlags = 0;
2988 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2989 }
2990 break;
2991 }
2992
2993 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2994 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2995 break;
2996
2997 default:
2998 fIemXcptFlags = 0;
2999 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3000 break;
3001 }
3002 return fIemXcptFlags;
3003}
3004
3005
3006/**
3007 * Sets an event as a pending event to be injected into the guest.
3008 *
3009 * @param pVCpu The cross context virtual CPU structure.
3010 * @param u32IntInfo The VM-entry interruption-information field.
3011 * @param cbInstr The VM-entry instruction length in bytes (for
3012 * software interrupts, exceptions and privileged
3013 * software exceptions).
3014 * @param u32ErrCode The VM-entry exception error code.
3015 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3016 * page-fault.
3017 */
3018DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3019 RTGCUINTPTR GCPtrFaultAddress)
3020{
3021 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3022 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3023 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3024 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3025 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3026 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3027}
3028
3029
3030/**
3031 * Sets an external interrupt as pending-for-injection into the VM.
3032 *
3033 * @param pVCpu The cross context virtual CPU structure.
3034 * @param u8Interrupt The external interrupt vector.
3035 */
3036DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3037{
3038 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3039 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3040 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3041 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3042 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3043 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
3044}
3045
3046
3047/**
3048 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3049 *
3050 * @param pVCpu The cross context virtual CPU structure.
3051 */
3052DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3053{
3054 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3055 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3056 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3057 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3058 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3059 Log4Func(("NMI pending injection\n"));
3060}
3061
3062
3063/**
3064 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3065 *
3066 * @param pVCpu The cross context virtual CPU structure.
3067 */
3068DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3069{
3070 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3071 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3072 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3073 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3074 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3075}
3076
3077
3078/**
3079 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3080 *
3081 * @param pVCpu The cross context virtual CPU structure.
3082 */
3083DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3084{
3085 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3086 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3087 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3088 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3089 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3090}
3091
3092
3093/**
3094 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3095 *
3096 * @param pVCpu The cross context virtual CPU structure.
3097 */
3098DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3099{
3100 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3101 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3102 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3103 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3104 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3105}
3106
3107
3108#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3109/**
3110 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3111 *
3112 * @param pVCpu The cross context virtual CPU structure.
3113 * @param u32ErrCode The error code for the general-protection exception.
3114 */
3115DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3116{
3117 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3118 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3119 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3120 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3121 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3122}
3123
3124
3125/**
3126 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3127 *
3128 * @param pVCpu The cross context virtual CPU structure.
3129 * @param u32ErrCode The error code for the stack exception.
3130 */
3131DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3132{
3133 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3134 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3135 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3136 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3137 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3138}
3139#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3140
3141
3142/**
3143 * Fixes up attributes for the specified segment register.
3144 *
3145 * @param pVCpu The cross context virtual CPU structure.
3146 * @param pSelReg The segment register that needs fixing.
3147 * @param pszRegName The register name (for logging and assertions).
3148 */
3149static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3150{
3151 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3152
3153 /*
3154 * If VT-x marks the segment as unusable, most other bits remain undefined:
3155 * - For CS the L, D and G bits have meaning.
3156 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3157 * - For the remaining data segments no bits are defined.
3158 *
3159 * The present bit and the unusable bit has been observed to be set at the
3160 * same time (the selector was supposed to be invalid as we started executing
3161 * a V8086 interrupt in ring-0).
3162 *
3163 * What should be important for the rest of the VBox code, is that the P bit is
3164 * cleared. Some of the other VBox code recognizes the unusable bit, but
3165 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3166 * safe side here, we'll strip off P and other bits we don't care about. If
3167 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3168 *
3169 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3170 */
3171#ifdef VBOX_STRICT
3172 uint32_t const uAttr = pSelReg->Attr.u;
3173#endif
3174
3175 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3176 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3177 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3178
3179#ifdef VBOX_STRICT
3180# ifndef IN_NEM_DARWIN
3181 VMMRZCallRing3Disable(pVCpu);
3182# endif
3183 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3184# ifdef DEBUG_bird
3185 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3186 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3187 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3188# endif
3189# ifndef IN_NEM_DARWIN
3190 VMMRZCallRing3Enable(pVCpu);
3191# endif
3192 NOREF(uAttr);
3193#endif
3194 RT_NOREF2(pVCpu, pszRegName);
3195}
3196
3197
3198/**
3199 * Imports a guest segment register from the current VMCS into the guest-CPU
3200 * context.
3201 *
3202 * @param pVCpu The cross context virtual CPU structure.
3203 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3204 *
3205 * @remarks Called with interrupts and/or preemption disabled.
3206 */
3207template<uint32_t const a_iSegReg>
3208DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3209{
3210 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3211 /* Check that the macros we depend upon here and in the export parenter function works: */
3212#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3213 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3214 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3215 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3216 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3217 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3218 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3219 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3220 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3221 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3222 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3223
3224 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3225
3226 uint16_t u16Sel;
3227 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3228 pSelReg->Sel = u16Sel;
3229 pSelReg->ValidSel = u16Sel;
3230
3231 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3232 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3233
3234 uint32_t u32Attr;
3235 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3236 pSelReg->Attr.u = u32Attr;
3237 if (u32Attr & X86DESCATTR_UNUSABLE)
3238 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3239
3240 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3241}
3242
3243
3244/**
3245 * Imports the guest LDTR from the VMCS into the guest-CPU context.
3246 *
3247 * @param pVCpu The cross context virtual CPU structure.
3248 *
3249 * @remarks Called with interrupts and/or preemption disabled.
3250 */
3251DECL_FORCE_INLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3252{
3253 uint16_t u16Sel;
3254 uint64_t u64Base;
3255 uint32_t u32Limit, u32Attr;
3256 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3257 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3258 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3259 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3260
3261 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3262 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3263 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3264 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3265 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3266 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3267 if (u32Attr & X86DESCATTR_UNUSABLE)
3268 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3269}
3270
3271
3272/**
3273 * Imports the guest TR from the VMCS into the guest-CPU context.
3274 *
3275 * @param pVCpu The cross context virtual CPU structure.
3276 *
3277 * @remarks Called with interrupts and/or preemption disabled.
3278 */
3279DECL_FORCE_INLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3280{
3281 uint16_t u16Sel;
3282 uint64_t u64Base;
3283 uint32_t u32Limit, u32Attr;
3284 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3285 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3286 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3287 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3288
3289 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3290 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3291 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3292 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3293 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3294 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3295 /* TR is the only selector that can never be unusable. */
3296 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3297}
3298
3299
3300/**
3301 * Core: Imports the guest RIP from the VMCS into the guest-CPU context.
3302 *
3303 * @returns The RIP value.
3304 * @param pVCpu The cross context virtual CPU structure.
3305 *
3306 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3307 * @remarks Do -not- call this function directly!
3308 */
3309DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3310{
3311 uint64_t u64Val;
3312 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3313 AssertRC(rc);
3314
3315 pVCpu->cpum.GstCtx.rip = u64Val;
3316
3317 return u64Val;
3318}
3319
3320
3321/**
3322 * Imports the guest RIP from the VMCS into the guest-CPU context.
3323 *
3324 * @param pVCpu The cross context virtual CPU structure.
3325 *
3326 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3327 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3328 * instead!!!
3329 */
3330DECL_FORCE_INLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3331{
3332 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3333 {
3334 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3335 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3336 }
3337}
3338
3339
3340/**
3341 * Core: Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3342 *
3343 * @param pVCpu The cross context virtual CPU structure.
3344 * @param pVmcsInfo The VMCS info. object.
3345 *
3346 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3347 * @remarks Do -not- call this function directly!
3348 */
3349DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3350{
3351 uint64_t fRFlags;
3352 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3353 AssertRC(rc);
3354
3355 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3356 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3357
3358 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3359#ifndef IN_NEM_DARWIN
3360 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3361 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3362 { /* mostly likely */ }
3363 else
3364 {
3365 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3366 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3367 }
3368#else
3369 RT_NOREF(pVmcsInfo);
3370#endif
3371}
3372
3373
3374/**
3375 * Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3376 *
3377 * @param pVCpu The cross context virtual CPU structure.
3378 * @param pVmcsInfo The VMCS info. object.
3379 *
3380 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3381 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3382 * instead!!!
3383 */
3384DECL_FORCE_INLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3385{
3386 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3387 {
3388 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3389 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3390 }
3391}
3392
3393
3394#ifndef IN_NEM_DARWIN
3395/**
3396 * Imports the guest TSX AUX and certain other MSRs from the VMCS into the guest-CPU
3397 * context.
3398 *
3399 * The other MSRs are in the VM-exit MSR-store.
3400 *
3401 * @returns VBox status code.
3402 * @param pVCpu The cross context virtual CPU structure.
3403 * @param pVmcsInfo The VMCS info. object.
3404 * @param fEFlags Saved EFLAGS for restoring the interrupt flag (in case of
3405 * unexpected errors). Ignored in NEM/darwin context.
3406 */
3407DECL_FORCE_INLINE(int) vmxHCImportGuestTscAuxAndOtherMsrs(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3408{
3409 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3410 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3411 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3412 Assert(pMsrs);
3413 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3414 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3415 for (uint32_t i = 0; i < cMsrs; i++)
3416 {
3417 uint32_t const idMsr = pMsrs[i].u32Msr;
3418 switch (idMsr)
3419 {
3420 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3421 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3422 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3423 default:
3424 {
3425 uint32_t idxLbrMsr;
3426 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3427 if (VM_IS_VMX_LBR(pVM))
3428 {
3429 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3430 {
3431 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3432 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3433 break;
3434 }
3435 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3436 {
3437 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3438 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3439 break;
3440 }
3441 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3442 {
3443 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3444 break;
3445 }
3446 /* Fallthru (no break) */
3447 }
3448 pVCpu->cpum.GstCtx.fExtrn = 0;
3449 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3450 ASMSetFlags(fEFlags);
3451 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3452 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3453 }
3454 }
3455 }
3456 return VINF_SUCCESS;
3457}
3458#endif /* !IN_NEM_DARWIN */
3459
3460
3461/**
3462 * Imports the guest CR0 from the VMCS into the guest-CPU context.
3463 *
3464 * @param pVCpu The cross context virtual CPU structure.
3465 * @param pVmcsInfo The VMCS info. object.
3466 */
3467DECL_FORCE_INLINE(void) vmxHCImportGuestCr0(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3468{
3469 uint64_t u64Cr0;
3470 uint64_t u64Shadow;
3471 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3472 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3473#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3474 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3475 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3476#else
3477 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
3478 {
3479 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3480 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3481 }
3482 else
3483 {
3484 /*
3485 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3486 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3487 * re-construct CR0. See @bugref{9180#c95} for details.
3488 */
3489 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3490 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3491 u64Cr0 = (u64Cr0 & ~(pVmcsInfoGst->u64Cr0Mask & pVmcsNstGst->u64Cr0Mask.u))
3492 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3493 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3494 Assert(u64Cr0 & X86_CR0_NE);
3495 }
3496#endif
3497
3498#ifndef IN_NEM_DARWIN
3499 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3500#endif
3501 CPUMSetGuestCR0(pVCpu, u64Cr0);
3502#ifndef IN_NEM_DARWIN
3503 VMMRZCallRing3Enable(pVCpu);
3504#endif
3505}
3506
3507
3508/**
3509 * Imports the guest CR3 from the VMCS into the guest-CPU context.
3510 *
3511 * @param pVCpu The cross context virtual CPU structure.
3512 */
3513DECL_FORCE_INLINE(void) vmxHCImportGuestCr3(PVMCPUCC pVCpu)
3514{
3515 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3516 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3517
3518 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3519 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3520 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3521 && CPUMIsGuestPagingEnabledEx(pCtx)))
3522 {
3523 uint64_t u64Cr3;
3524 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3525 if (pCtx->cr3 != u64Cr3)
3526 {
3527 pCtx->cr3 = u64Cr3;
3528 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3529 }
3530
3531 /*
3532 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3533 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3534 */
3535 if (CPUMIsGuestInPAEModeEx(pCtx))
3536 {
3537 X86PDPE aPaePdpes[4];
3538 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3539 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3540 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3541 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3542 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3543 {
3544 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3545 /* PGM now updates PAE PDPTEs while updating CR3. */
3546 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3547 }
3548 }
3549 }
3550}
3551
3552
3553/**
3554 * Imports the guest CR4 from the VMCS into the guest-CPU context.
3555 *
3556 * @param pVCpu The cross context virtual CPU structure.
3557 * @param pVmcsInfo The VMCS info. object.
3558 */
3559DECL_FORCE_INLINE(void) vmxHCImportGuestCr4(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3560{
3561 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3562 uint64_t u64Cr4;
3563 uint64_t u64Shadow;
3564 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3565 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3566#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3567 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3568 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3569#else
3570 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3571 {
3572 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3573 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3574 }
3575 else
3576 {
3577 /*
3578 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3579 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3580 * re-construct CR4. See @bugref{9180#c95} for details.
3581 */
3582 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3583 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3584 u64Cr4 = (u64Cr4 & ~(pVmcsInfo->u64Cr4Mask & pVmcsNstGst->u64Cr4Mask.u))
3585 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3586 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3587 Assert(u64Cr4 & X86_CR4_VMXE);
3588 }
3589#endif
3590 pCtx->cr4 = u64Cr4;
3591}
3592
3593
3594/**
3595 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3596 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3597 */
3598DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3599{
3600 /*
3601 * We must import RIP here to set our EM interrupt-inhibited state.
3602 * We also import RFLAGS as our code that evaluates pending interrupts
3603 * before VM-entry requires it.
3604 */
3605 vmxHCImportGuestRip(pVCpu);
3606 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3607
3608 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3609 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3610 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3611 pVCpu->cpum.GstCtx.rip);
3612 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3613}
3614
3615
3616/**
3617 * Imports the guest interruptibility-state from the VMCS into the guest-CPU
3618 * context.
3619 *
3620 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3621 *
3622 * @param pVCpu The cross context virtual CPU structure.
3623 * @param pVmcsInfo The VMCS info. object.
3624 *
3625 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3626 * do not log!
3627 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3628 * instead!!!
3629 */
3630DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3631{
3632 uint32_t u32Val;
3633 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3634 Assert((u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
3635 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
3636 if (!u32Val)
3637 {
3638 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3639 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3640 }
3641 else
3642 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3643}
3644
3645
3646/**
3647 * Worker for VMXR0ImportStateOnDemand.
3648 *
3649 * @returns VBox status code.
3650 * @param pVCpu The cross context virtual CPU structure.
3651 * @param pVmcsInfo The VMCS info. object.
3652 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3653 */
3654static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3655{
3656 int rc = VINF_SUCCESS;
3657 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3658 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3659 uint32_t u32Val;
3660
3661 /*
3662 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3663 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3664 * neither are other host platforms.
3665 *
3666 * Committing this temporarily as it prevents BSOD.
3667 *
3668 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3669 */
3670#ifdef RT_OS_WINDOWS
3671 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3672 return VERR_HM_IPE_1;
3673#endif
3674
3675 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3676
3677#ifndef IN_NEM_DARWIN
3678 /*
3679 * We disable interrupts to make the updating of the state and in particular
3680 * the fExtrn modification atomic wrt to preemption hooks.
3681 */
3682 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3683#endif
3684
3685 fWhat &= pCtx->fExtrn;
3686 if (fWhat)
3687 {
3688 do
3689 {
3690 if (fWhat & CPUMCTX_EXTRN_RIP)
3691 vmxHCImportGuestRip(pVCpu);
3692
3693 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3694 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3695
3696 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3697 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3698 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3699
3700 if (fWhat & CPUMCTX_EXTRN_RSP)
3701 {
3702 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3703 AssertRC(rc);
3704 }
3705
3706 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3707 {
3708 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3709#ifndef IN_NEM_DARWIN
3710 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3711#else
3712 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3713#endif
3714 if (fWhat & CPUMCTX_EXTRN_CS)
3715 {
3716 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3717 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3718 if (fRealOnV86Active)
3719 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3720 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3721 }
3722 if (fWhat & CPUMCTX_EXTRN_SS)
3723 {
3724 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3725 if (fRealOnV86Active)
3726 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3727 }
3728 if (fWhat & CPUMCTX_EXTRN_DS)
3729 {
3730 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3731 if (fRealOnV86Active)
3732 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3733 }
3734 if (fWhat & CPUMCTX_EXTRN_ES)
3735 {
3736 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3737 if (fRealOnV86Active)
3738 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3739 }
3740 if (fWhat & CPUMCTX_EXTRN_FS)
3741 {
3742 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3743 if (fRealOnV86Active)
3744 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3745 }
3746 if (fWhat & CPUMCTX_EXTRN_GS)
3747 {
3748 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3749 if (fRealOnV86Active)
3750 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3751 }
3752 }
3753
3754 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3755 {
3756 if (fWhat & CPUMCTX_EXTRN_LDTR)
3757 vmxHCImportGuestLdtr(pVCpu);
3758
3759 if (fWhat & CPUMCTX_EXTRN_GDTR)
3760 {
3761 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3762 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3763 pCtx->gdtr.cbGdt = u32Val;
3764 }
3765
3766 /* Guest IDTR. */
3767 if (fWhat & CPUMCTX_EXTRN_IDTR)
3768 {
3769 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3770 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3771 pCtx->idtr.cbIdt = u32Val;
3772 }
3773
3774 /* Guest TR. */
3775 if (fWhat & CPUMCTX_EXTRN_TR)
3776 {
3777#ifndef IN_NEM_DARWIN
3778 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3779 don't need to import that one. */
3780 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3781#endif
3782 vmxHCImportGuestTr(pVCpu);
3783 }
3784 }
3785
3786 if (fWhat & CPUMCTX_EXTRN_DR7)
3787 {
3788#ifndef IN_NEM_DARWIN
3789 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3790#endif
3791 {
3792 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3793 AssertRC(rc);
3794 }
3795 }
3796
3797 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3798 {
3799 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3800 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3801 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3802 pCtx->SysEnter.cs = u32Val;
3803 }
3804
3805#ifndef IN_NEM_DARWIN
3806 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3807 {
3808 if ( pVM->hmr0.s.fAllow64BitGuests
3809 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3810 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3811 }
3812
3813 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3814 {
3815 if ( pVM->hmr0.s.fAllow64BitGuests
3816 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3817 {
3818 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3819 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3820 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3821 }
3822 }
3823
3824 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3825 {
3826 rc = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
3827 AssertRCReturn(rc, rc);
3828 }
3829#else
3830 NOREF(pVM);
3831#endif
3832
3833 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3834 {
3835 if (fWhat & CPUMCTX_EXTRN_CR0)
3836 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
3837
3838 if (fWhat & CPUMCTX_EXTRN_CR4)
3839 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
3840
3841 if (fWhat & CPUMCTX_EXTRN_CR3)
3842 vmxHCImportGuestCr3(pVCpu);
3843 }
3844
3845#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3846 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3847 {
3848 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3849 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3850 {
3851 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3852 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3853 if (RT_SUCCESS(rc))
3854 { /* likely */ }
3855 else
3856 break;
3857 }
3858 }
3859#endif
3860 } while (0);
3861
3862 if (RT_SUCCESS(rc))
3863 {
3864 /* Update fExtrn. */
3865 pCtx->fExtrn &= ~fWhat;
3866
3867 /* If everything has been imported, clear the HM keeper bit. */
3868 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3869 {
3870#ifndef IN_NEM_DARWIN
3871 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3872#else
3873 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3874#endif
3875 Assert(!pCtx->fExtrn);
3876 }
3877 }
3878 }
3879#ifndef IN_NEM_DARWIN
3880 else
3881 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3882
3883 /*
3884 * Restore interrupts.
3885 */
3886 ASMSetFlags(fEFlags);
3887#endif
3888
3889 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3890
3891 if (RT_SUCCESS(rc))
3892 { /* likely */ }
3893 else
3894 return rc;
3895
3896 /*
3897 * Honor any pending CR3 updates.
3898 *
3899 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3900 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3901 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3902 *
3903 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3904 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3905 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3906 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3907 *
3908 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3909 *
3910 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3911 */
3912 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3913#ifndef IN_NEM_DARWIN
3914 && VMMRZCallRing3IsEnabled(pVCpu)
3915#endif
3916 )
3917 {
3918 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3919 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3920 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3921 }
3922
3923 return VINF_SUCCESS;
3924}
3925
3926
3927/**
3928 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3929 *
3930 * @returns VBox status code.
3931 * @param pVCpu The cross context virtual CPU structure.
3932 * @param pVmcsInfo The VMCS info. object.
3933 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3934 * in NEM/darwin context.
3935 * @tparam a_fWhat What to import, zero or more bits from
3936 * HMVMX_CPUMCTX_EXTRN_ALL.
3937 */
3938template<uint64_t const a_fWhat>
3939static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3940{
3941 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3942 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3943 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3944 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3945
3946 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3947
3948 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3949
3950 /* RIP and RFLAGS may have been imported already by the post exit code
3951 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3952 of the code is skipping this part of the code. */
3953 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3954 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3955 {
3956 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3957 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3958
3959 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3960 {
3961 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3962 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3963 else
3964 vmxHCImportGuestCoreRip(pVCpu);
3965 }
3966 }
3967
3968 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3969 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3970 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3971
3972 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3973 {
3974 if (a_fWhat & CPUMCTX_EXTRN_CS)
3975 {
3976 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3977 /** @todo try get rid of this carp, it smells and is probably never ever
3978 * used: */
3979 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3980 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3981 {
3982 vmxHCImportGuestCoreRip(pVCpu);
3983 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3984 }
3985 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3986 }
3987 if (a_fWhat & CPUMCTX_EXTRN_SS)
3988 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3989 if (a_fWhat & CPUMCTX_EXTRN_DS)
3990 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3991 if (a_fWhat & CPUMCTX_EXTRN_ES)
3992 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3993 if (a_fWhat & CPUMCTX_EXTRN_FS)
3994 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3995 if (a_fWhat & CPUMCTX_EXTRN_GS)
3996 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3997
3998 /* Guest TR.
3999 Real-mode emulation using virtual-8086 mode has the fake TSS
4000 (pRealModeTSS) in TR, don't need to import that one. */
4001#ifndef IN_NEM_DARWIN
4002 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
4003 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
4004 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
4005#else
4006 if (a_fWhat & CPUMCTX_EXTRN_TR)
4007#endif
4008 vmxHCImportGuestTr(pVCpu);
4009
4010#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
4011 if (fRealOnV86Active)
4012 {
4013 if (a_fWhat & CPUMCTX_EXTRN_CS)
4014 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
4015 if (a_fWhat & CPUMCTX_EXTRN_SS)
4016 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
4017 if (a_fWhat & CPUMCTX_EXTRN_DS)
4018 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
4019 if (a_fWhat & CPUMCTX_EXTRN_ES)
4020 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
4021 if (a_fWhat & CPUMCTX_EXTRN_FS)
4022 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
4023 if (a_fWhat & CPUMCTX_EXTRN_GS)
4024 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
4025 }
4026#endif
4027 }
4028
4029 if (a_fWhat & CPUMCTX_EXTRN_RSP)
4030 {
4031 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
4032 AssertRC(rc);
4033 }
4034
4035 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
4036 vmxHCImportGuestLdtr(pVCpu);
4037
4038 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
4039 {
4040 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
4041 uint32_t u32Val;
4042 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
4043 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
4044 }
4045
4046 /* Guest IDTR. */
4047 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
4048 {
4049 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
4050 uint32_t u32Val;
4051 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
4052 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
4053 }
4054
4055 if (a_fWhat & CPUMCTX_EXTRN_DR7)
4056 {
4057#ifndef IN_NEM_DARWIN
4058 if (!pVCpu->hmr0.s.fUsingHyperDR7)
4059#endif
4060 {
4061 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
4062 AssertRC(rc);
4063 }
4064 }
4065
4066 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
4067 {
4068 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
4069 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
4070 uint32_t u32Val;
4071 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
4072 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
4073 }
4074
4075#ifndef IN_NEM_DARWIN
4076 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4077 {
4078 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4079 && pVM->hmr0.s.fAllow64BitGuests)
4080 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4081 }
4082
4083 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4084 {
4085 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4086 && pVM->hmr0.s.fAllow64BitGuests)
4087 {
4088 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4089 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4090 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4091 }
4092 }
4093
4094 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4095 {
4096 int const rc1 = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
4097 AssertRCReturn(rc1, rc1);
4098 }
4099#else
4100 NOREF(pVM);
4101#endif
4102
4103 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4104 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
4105
4106 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4107 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
4108
4109 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4110 vmxHCImportGuestCr3(pVCpu);
4111
4112#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4113 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4114 {
4115 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4116 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4117 {
4118 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4119 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4120 AssertRCReturn(rc, rc);
4121 }
4122 }
4123#endif
4124
4125 /* Update fExtrn. */
4126 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4127
4128 /* If everything has been imported, clear the HM keeper bit. */
4129 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4130 {
4131#ifndef IN_NEM_DARWIN
4132 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4133#else
4134 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4135#endif
4136 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4137 }
4138
4139 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4140
4141 /*
4142 * Honor any pending CR3 updates.
4143 *
4144 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4145 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4146 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4147 *
4148 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4149 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4150 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4151 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4152 *
4153 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4154 *
4155 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4156 */
4157#ifndef IN_NEM_DARWIN
4158 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4159 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4160 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4161 return VINF_SUCCESS;
4162 ASMSetFlags(fEFlags);
4163#else
4164 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4165 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4166 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4167 return VINF_SUCCESS;
4168 RT_NOREF_PV(fEFlags);
4169#endif
4170
4171 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4172 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4173 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4174 return VINF_SUCCESS;
4175}
4176
4177
4178/**
4179 * Internal state fetcher.
4180 *
4181 * @returns VBox status code.
4182 * @param pVCpu The cross context virtual CPU structure.
4183 * @param pVmcsInfo The VMCS info. object.
4184 * @param pszCaller For logging.
4185 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4186 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4187 * already. This is ORed together with @a a_fWhat when
4188 * calculating what needs fetching (just for safety).
4189 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4190 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4191 * already. This is ORed together with @a a_fWhat when
4192 * calculating what needs fetching (just for safety).
4193 */
4194template<uint64_t const a_fWhat,
4195 uint64_t const a_fDoneLocal = 0,
4196 uint64_t const a_fDonePostExit = 0
4197#ifndef IN_NEM_DARWIN
4198 | CPUMCTX_EXTRN_INHIBIT_INT
4199 | CPUMCTX_EXTRN_INHIBIT_NMI
4200# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4201 | HMVMX_CPUMCTX_EXTRN_ALL
4202# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4203 | CPUMCTX_EXTRN_RFLAGS
4204# endif
4205#else /* IN_NEM_DARWIN */
4206 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4207#endif /* IN_NEM_DARWIN */
4208>
4209DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4210{
4211 RT_NOREF_PV(pszCaller);
4212 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4213 {
4214#ifndef IN_NEM_DARWIN
4215 /*
4216 * We disable interrupts to make the updating of the state and in particular
4217 * the fExtrn modification atomic wrt to preemption hooks.
4218 */
4219 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4220#else
4221 RTCCUINTREG const fEFlags = 0;
4222#endif
4223
4224 /*
4225 * We combine all three parameters and take the (probably) inlined optimized
4226 * code path for the new things specified in a_fWhat.
4227 *
4228 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4229 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4230 * also take the streamlined path when both of these are cleared in fExtrn
4231 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4232 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4233 */
4234 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4235 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4236 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4237 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4238 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4239 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4240 {
4241 int const rc = vmxHCImportGuestStateInner< a_fWhat
4242 & HMVMX_CPUMCTX_EXTRN_ALL
4243 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4244#ifndef IN_NEM_DARWIN
4245 ASMSetFlags(fEFlags);
4246#endif
4247 return rc;
4248 }
4249
4250#ifndef IN_NEM_DARWIN
4251 ASMSetFlags(fEFlags);
4252#endif
4253
4254 /*
4255 * We shouldn't normally get here, but it may happen when executing
4256 * in the debug run-loops. Typically, everything should already have
4257 * been fetched then. Otherwise call the fallback state import function.
4258 */
4259 if (fWhatToDo == 0)
4260 { /* hope the cause was the debug loop or something similar */ }
4261 else
4262 {
4263 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4264 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4265 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4266 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4267 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4268 }
4269 }
4270 return VINF_SUCCESS;
4271}
4272
4273
4274/**
4275 * Check per-VM and per-VCPU force flag actions that require us to go back to
4276 * ring-3 for one reason or another.
4277 *
4278 * @returns Strict VBox status code (i.e. informational status codes too)
4279 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4280 * ring-3.
4281 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4282 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4283 * interrupts)
4284 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4285 * all EMTs to be in ring-3.
4286 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4287 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4288 * to the EM loop.
4289 *
4290 * @param pVCpu The cross context virtual CPU structure.
4291 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4292 * @param fStepping Whether we are single-stepping the guest using the
4293 * hypervisor debugger.
4294 *
4295 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4296 * is no longer in VMX non-root mode.
4297 */
4298static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4299{
4300#ifndef IN_NEM_DARWIN
4301 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4302#endif
4303
4304 /*
4305 * Update pending interrupts into the APIC's IRR.
4306 */
4307 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4308 APICUpdatePendingInterrupts(pVCpu);
4309
4310 /*
4311 * Anything pending? Should be more likely than not if we're doing a good job.
4312 */
4313 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4314 if ( !fStepping
4315 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4316 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4317 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4318 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4319 return VINF_SUCCESS;
4320
4321 /* Pending PGM C3 sync. */
4322 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4323 {
4324 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4325 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4326 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4327 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4328 if (rcStrict != VINF_SUCCESS)
4329 {
4330 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4331 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4332 return rcStrict;
4333 }
4334 }
4335
4336 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4337 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4338 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4339 {
4340 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4341 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4342 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4343 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4344 return rc;
4345 }
4346
4347 /* Pending VM request packets, such as hardware interrupts. */
4348 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4349 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4350 {
4351 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4352 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4353 return VINF_EM_PENDING_REQUEST;
4354 }
4355
4356 /* Pending PGM pool flushes. */
4357 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4358 {
4359 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4360 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4361 return VINF_PGM_POOL_FLUSH_PENDING;
4362 }
4363
4364 /* Pending DMA requests. */
4365 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4366 {
4367 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4368 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4369 return VINF_EM_RAW_TO_R3;
4370 }
4371
4372#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4373 /*
4374 * Pending nested-guest events.
4375 *
4376 * Please note the priority of these events are specified and important.
4377 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4378 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4379 *
4380 * Interrupt-window and NMI-window VM-exits for the nested-guest need not be
4381 * handled here. They'll be handled by the hardware while executing the nested-guest
4382 * or by us when we injecting events that are not part of VM-entry of the nested-guest.
4383 */
4384 if (fIsNestedGuest)
4385 {
4386 /* Pending nested-guest APIC-write (may or may not cause a VM-exit). */
4387 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4388 {
4389 Log4Func(("Pending nested-guest APIC-write\n"));
4390 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4391 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4392 if ( rcStrict == VINF_SUCCESS
4393 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4394 return rcStrict;
4395 }
4396
4397 /* Pending nested-guest monitor-trap flag (MTF). */
4398 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4399 {
4400 Log4Func(("Pending nested-guest MTF\n"));
4401 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4402 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4403 return rcStrict;
4404 }
4405
4406 /* Pending nested-guest VMX-preemption timer expired. */
4407 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4408 {
4409 Log4Func(("Pending nested-guest preempt timer\n"));
4410 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4411 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4412 return rcStrict;
4413 }
4414 }
4415#else
4416 NOREF(fIsNestedGuest);
4417#endif
4418
4419 return VINF_SUCCESS;
4420}
4421
4422
4423/**
4424 * Converts any TRPM trap into a pending HM event. This is typically used when
4425 * entering from ring-3 (not longjmp returns).
4426 *
4427 * @param pVCpu The cross context virtual CPU structure.
4428 */
4429static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4430{
4431 Assert(TRPMHasTrap(pVCpu));
4432 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4433
4434 uint8_t uVector;
4435 TRPMEVENT enmTrpmEvent;
4436 uint32_t uErrCode;
4437 RTGCUINTPTR GCPtrFaultAddress;
4438 uint8_t cbInstr;
4439 bool fIcebp;
4440
4441 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4442 AssertRC(rc);
4443
4444 uint32_t u32IntInfo;
4445 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4446 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4447
4448 rc = TRPMResetTrap(pVCpu);
4449 AssertRC(rc);
4450 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4451 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4452
4453 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4454}
4455
4456
4457/**
4458 * Converts the pending HM event into a TRPM trap.
4459 *
4460 * @param pVCpu The cross context virtual CPU structure.
4461 */
4462static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4463{
4464 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4465
4466 /* If a trap was already pending, we did something wrong! */
4467 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4468
4469 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4470 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4471 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4472
4473 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4474
4475 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4476 AssertRC(rc);
4477
4478 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4479 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4480
4481 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4482 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4483 else
4484 {
4485 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4486 switch (uVectorType)
4487 {
4488 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4489 TRPMSetTrapDueToIcebp(pVCpu);
4490 RT_FALL_THRU();
4491 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4492 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4493 {
4494 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4495 || ( uVector == X86_XCPT_BP /* INT3 */
4496 || uVector == X86_XCPT_OF /* INTO */
4497 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4498 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4499 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4500 break;
4501 }
4502 }
4503 }
4504
4505 /* We're now done converting the pending event. */
4506 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4507}
4508
4509
4510/**
4511 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4512 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4513 *
4514 * @param pVCpu The cross context virtual CPU structure.
4515 * @param pVmcsInfo The VMCS info. object.
4516 */
4517static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4518{
4519 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4520 {
4521 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4522 {
4523 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4524 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4525 AssertRC(rc);
4526 }
4527 Log4Func(("Enabled interrupt-window exiting\n"));
4528 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4529}
4530
4531
4532/**
4533 * Clears the interrupt-window exiting control in the VMCS.
4534 *
4535 * @param pVCpu The cross context virtual CPU structure.
4536 * @param pVmcsInfo The VMCS info. object.
4537 */
4538DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4539{
4540 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4541 {
4542 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4543 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4544 AssertRC(rc);
4545 Log4Func(("Disabled interrupt-window exiting\n"));
4546 }
4547}
4548
4549
4550/**
4551 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4552 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4553 *
4554 * @param pVCpu The cross context virtual CPU structure.
4555 * @param pVmcsInfo The VMCS info. object.
4556 */
4557static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4558{
4559 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4560 {
4561 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4562 {
4563 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4564 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4565 AssertRC(rc);
4566 Log4Func(("Enabled NMI-window exiting\n"));
4567 }
4568 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4569}
4570
4571
4572/**
4573 * Clears the NMI-window exiting control in the VMCS.
4574 *
4575 * @param pVCpu The cross context virtual CPU structure.
4576 * @param pVmcsInfo The VMCS info. object.
4577 */
4578DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4579{
4580 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4581 {
4582 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4583 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4584 AssertRC(rc);
4585 Log4Func(("Disabled NMI-window exiting\n"));
4586 }
4587}
4588
4589
4590/**
4591 * Injects an event into the guest upon VM-entry by updating the relevant fields
4592 * in the VM-entry area in the VMCS.
4593 *
4594 * @returns Strict VBox status code (i.e. informational status codes too).
4595 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4596 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4597 *
4598 * @param pVCpu The cross context virtual CPU structure.
4599 * @param pVmcsInfo The VMCS info object.
4600 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4601 * @param pEvent The event being injected.
4602 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4603 * will be updated if necessary. This cannot not be NULL.
4604 * @param fStepping Whether we're single-stepping guest execution and should
4605 * return VINF_EM_DBG_STEPPED if the event is injected
4606 * directly (registers modified by us, not by hardware on
4607 * VM-entry).
4608 */
4609static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4610 bool fStepping, uint32_t *pfIntrState)
4611{
4612 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4613 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4614 Assert(pfIntrState);
4615
4616#ifdef IN_NEM_DARWIN
4617 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4618#endif
4619
4620 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4621 uint32_t u32IntInfo = pEvent->u64IntInfo;
4622 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4623 uint32_t const cbInstr = pEvent->cbInstr;
4624 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4625 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4626 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4627
4628#ifdef VBOX_STRICT
4629 /*
4630 * Validate the error-code-valid bit for hardware exceptions.
4631 * No error codes for exceptions in real-mode.
4632 *
4633 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4634 */
4635 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4636 && !CPUMIsGuestInRealModeEx(pCtx))
4637 {
4638 switch (uVector)
4639 {
4640 case X86_XCPT_PF:
4641 case X86_XCPT_DF:
4642 case X86_XCPT_TS:
4643 case X86_XCPT_NP:
4644 case X86_XCPT_SS:
4645 case X86_XCPT_GP:
4646 case X86_XCPT_AC:
4647 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4648 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4649 RT_FALL_THRU();
4650 default:
4651 break;
4652 }
4653 }
4654
4655 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4656 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4657 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4658#endif
4659
4660 RT_NOREF(uVector);
4661 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4662 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4663 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4664 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4665 {
4666 Assert(uVector <= X86_XCPT_LAST);
4667 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4668 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4669 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4670 }
4671 else
4672 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4673
4674 /*
4675 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4676 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4677 * interrupt handler in the (real-mode) guest.
4678 *
4679 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4680 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4681 */
4682 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4683 {
4684#ifndef IN_NEM_DARWIN
4685 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4686#endif
4687 {
4688 /*
4689 * For CPUs with unrestricted guest execution enabled and with the guest
4690 * in real-mode, we must not set the deliver-error-code bit.
4691 *
4692 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4693 */
4694 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4695 }
4696#ifndef IN_NEM_DARWIN
4697 else
4698 {
4699 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4700 Assert(PDMVmmDevHeapIsEnabled(pVM));
4701 Assert(pVM->hm.s.vmx.pRealModeTSS);
4702 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4703
4704 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4705 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4706 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4707 AssertRCReturn(rc2, rc2);
4708
4709 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4710 size_t const cbIdtEntry = sizeof(X86IDTR16);
4711 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4712 {
4713 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4714 if (uVector == X86_XCPT_DF)
4715 return VINF_EM_RESET;
4716
4717 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4718 No error codes for exceptions in real-mode. */
4719 if (uVector == X86_XCPT_GP)
4720 {
4721 static HMEVENT const s_EventXcptDf
4722 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4723 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4724 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4725 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4726 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4727 }
4728
4729 /*
4730 * If we're injecting an event with no valid IDT entry, inject a #GP.
4731 * No error codes for exceptions in real-mode.
4732 *
4733 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4734 */
4735 static HMEVENT const s_EventXcptGp
4736 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4737 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4738 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4739 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4740 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4741 }
4742
4743 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4744 uint16_t uGuestIp = pCtx->ip;
4745 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4746 {
4747 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4748 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4749 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4750 }
4751 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4752 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4753
4754 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4755 X86IDTR16 IdtEntry;
4756 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4757 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4758 AssertRCReturn(rc2, rc2);
4759
4760 /* Construct the stack frame for the interrupt/exception handler. */
4761 VBOXSTRICTRC rcStrict;
4762 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4763 if (rcStrict == VINF_SUCCESS)
4764 {
4765 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4766 if (rcStrict == VINF_SUCCESS)
4767 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4768 }
4769
4770 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4771 if (rcStrict == VINF_SUCCESS)
4772 {
4773 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4774 pCtx->rip = IdtEntry.offSel;
4775 pCtx->cs.Sel = IdtEntry.uSel;
4776 pCtx->cs.ValidSel = IdtEntry.uSel;
4777 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4778 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4779 && uVector == X86_XCPT_PF)
4780 pCtx->cr2 = GCPtrFault;
4781
4782 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4783 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4784 | HM_CHANGED_GUEST_RSP);
4785
4786 /*
4787 * If we delivered a hardware exception (other than an NMI) and if there was
4788 * block-by-STI in effect, we should clear it.
4789 */
4790 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4791 {
4792 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4793 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4794 Log4Func(("Clearing inhibition due to STI\n"));
4795 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4796 }
4797
4798 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4799 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4800
4801 /*
4802 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4803 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4804 */
4805 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4806
4807 /*
4808 * If we eventually support nested-guest execution without unrestricted guest execution,
4809 * we should set fInterceptEvents here.
4810 */
4811 Assert(!fIsNestedGuest);
4812
4813 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4814 if (fStepping)
4815 rcStrict = VINF_EM_DBG_STEPPED;
4816 }
4817 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4818 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4819 return rcStrict;
4820 }
4821#else
4822 RT_NOREF(pVmcsInfo);
4823#endif
4824 }
4825
4826 /*
4827 * Validate.
4828 */
4829 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4830 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4831
4832 /*
4833 * Inject the event into the VMCS.
4834 */
4835 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4836 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4837 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4838 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4839 AssertRC(rc);
4840
4841 /*
4842 * Update guest CR2 if this is a page-fault.
4843 */
4844 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4845 pCtx->cr2 = GCPtrFault;
4846
4847 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4848 return VINF_SUCCESS;
4849}
4850
4851
4852/**
4853 * Evaluates the event to be delivered to the guest and sets it as the pending
4854 * event.
4855 *
4856 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4857 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4858 * NOT restore these force-flags.
4859 *
4860 * @returns Strict VBox status code (i.e. informational status codes too).
4861 * @param pVCpu The cross context virtual CPU structure.
4862 * @param pVmcsInfo The VMCS information structure.
4863 */
4864static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4865{
4866 Assert(!TRPMHasTrap(pVCpu));
4867
4868 /*
4869 * Evaluate if a new event needs to be injected.
4870 * An event that's already pending has already performed all necessary checks.
4871 */
4872 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4873 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
4874 {
4875 /** @todo SMI. SMIs take priority over NMIs. */
4876
4877 /*
4878 * NMIs.
4879 * NMIs take priority over external interrupts.
4880 */
4881 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4882 {
4883 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4884 {
4885 /* Finally, inject the NMI and we're done. */
4886 vmxHCSetPendingXcptNmi(pVCpu);
4887 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4888 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4889 return VINF_SUCCESS;
4890 }
4891
4892 /*
4893 * Setup NMI-window exiting and also clear any interrupt-window exiting that might
4894 * still be active. This can happen if we got VM-exits that were higher priority
4895 * than an interrupt-window VM-exit.
4896 */
4897 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4898 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4899 }
4900 else
4901 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4902
4903 /*
4904 * External interrupts (PIC/APIC).
4905 */
4906 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4907 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4908 {
4909 Assert(!DBGFIsStepping(pVCpu));
4910 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4911 AssertRC(rc);
4912
4913 if (pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF)
4914 {
4915 /*
4916 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it.
4917 * We cannot re-request the interrupt from the controller again.
4918 */
4919 uint8_t u8Interrupt;
4920 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4921 if (RT_SUCCESS(rc))
4922 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4923 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4924 {
4925 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4926 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4927 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4928 /*
4929 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4930 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4931 * need to re-set this force-flag here.
4932 */
4933 }
4934 else
4935 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4936
4937 /* We must clear interrupt-window exiting for the same reason mentioned above for NMIs. */
4938 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4939 return VINF_SUCCESS;
4940 }
4941
4942 /* Setup interrupt-window exiting. */
4943 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4944 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4945 }
4946 else
4947 {
4948 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4949 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4950 }
4951 }
4952 else
4953 {
4954 /*
4955 * An event is being injected or we are in an interrupt shadow.
4956 * If another event is pending currently, instruct VT-x to cause a VM-exit as
4957 * soon as the guest is ready to accept it.
4958 */
4959 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4960 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4961 else
4962 {
4963 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4964 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4965 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4966 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4967 else
4968 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT));
4969 }
4970 }
4971
4972 return VINF_SUCCESS;
4973}
4974
4975
4976#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4977/**
4978 * Evaluates the event to be delivered to the nested-guest and sets it as the
4979 * pending event.
4980 *
4981 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4982 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4983 * NOT restore these force-flags.
4984 *
4985 * @returns Strict VBox status code (i.e. informational status codes too).
4986 * @param pVCpu The cross context virtual CPU structure.
4987 * @param pVmcsInfo The VMCS information structure.
4988 *
4989 * @remarks The guest must be in VMX non-root mode.
4990 */
4991static VBOXSTRICTRC vmxHCEvaluatePendingEventNested(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4992{
4993 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4994
4995 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
4996 Assert(!TRPMHasTrap(pVCpu));
4997
4998 /*
4999 * If we are injecting an event, all necessary checks have been performed.
5000 * Any interrupt-window or NMI-window exiting would have been setup by the
5001 * nested-guest while we merged controls.
5002 */
5003 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5004 return VINF_SUCCESS;
5005
5006 /*
5007 * An event injected by VMLAUNCH/VMRESUME instruction emulation should've been
5008 * made pending (TRPM to HM event) and would be handled above if we resumed
5009 * execution in HM. If somehow we fell back to emulation after the
5010 * VMLAUNCH/VMRESUME instruction, it would have been handled in iemRaiseXcptOrInt
5011 * (calling iemVmxVmexitEvent). Thus, if we get here the nested-hypervisor's VMX
5012 * intercepts should be active and any events pending here have been generated
5013 * while executing the guest in VMX non-root mode after virtual VM-entry completed.
5014 */
5015 Assert(CPUMIsGuestVmxInterceptEvents(pCtx));
5016
5017 /*
5018 * Interrupt shadows MAY block NMIs.
5019 * They also blocks external-interrupts and MAY block external-interrupt VM-exits.
5020 *
5021 * See Intel spec. 24.4.2 "Guest Non-Register State".
5022 * See Intel spec. 25.4.1 "Event Blocking".
5023 */
5024 if (!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
5025 { /* likely */ }
5026 else
5027 return VINF_SUCCESS;
5028
5029 /** @todo SMI. SMIs take priority over NMIs. */
5030
5031 /*
5032 * NMIs.
5033 * NMIs take priority over interrupts.
5034 */
5035 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
5036 {
5037 /*
5038 * Nested-guest NMI-window exiting.
5039 * The NMI-window exit must happen regardless of whether an NMI is pending
5040 * provided virtual-NMI blocking is not in effect.
5041 *
5042 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5043 */
5044 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
5045 && !CPUMIsGuestVmxVirtNmiBlocking(pCtx))
5046 {
5047 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
5048 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
5049 }
5050
5051 /*
5052 * For a nested-guest, the FF always indicates the outer guest's ability to
5053 * receive an NMI while the guest-interruptibility state bit depends on whether
5054 * the nested-hypervisor is using virtual-NMIs.
5055 *
5056 * It is very important that we also clear the force-flag if we are causing
5057 * an NMI VM-exit as it is the responsibility of the nested-hypervisor to deal
5058 * with re-injecting or discarding the NMI. This fixes the bug that showed up
5059 * with SMP Windows Server 2008 R2 with Hyper-V enabled, see @bugref{10318#c19}.
5060 */
5061 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5062 {
5063 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
5064 return IEMExecVmxVmexitXcptNmi(pVCpu);
5065 vmxHCSetPendingXcptNmi(pVCpu);
5066 return VINF_SUCCESS;
5067 }
5068 }
5069
5070 /*
5071 * Nested-guest interrupt-window exiting.
5072 *
5073 * We must cause the interrupt-window exit regardless of whether an interrupt is pending
5074 * provided virtual interrupts are enabled.
5075 *
5076 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5077 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5078 */
5079 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
5080 && CPUMIsGuestVmxVirtIntrEnabled(pCtx))
5081 {
5082 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
5083 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
5084 }
5085
5086 /*
5087 * External interrupts (PIC/APIC).
5088 *
5089 * When "External interrupt exiting" is set the VM-exit happens regardless of RFLAGS.IF.
5090 * When it isn't set, RFLAGS.IF controls delivery of the interrupt as always.
5091 * This fixes a nasty SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued
5092 * by other VM-exits (like a preemption timer), see @bugref{9562#c18}.
5093 *
5094 * NMIs block external interrupts as they are dispatched through the interrupt gate (vector 2)
5095 * which automatically clears EFLAGS.IF. Also it's possible an NMI handler could enable interrupts
5096 * and thus we should not check for NMI inhibition here.
5097 *
5098 * See Intel spec. 25.4.1 "Event Blocking".
5099 * See Intel spec. 6.8.1 "Masking Maskable Hardware Interrupts".
5100 */
5101 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5102 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5103 {
5104 Assert(!DBGFIsStepping(pVCpu));
5105 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
5106 AssertRC(rc);
5107 if (CPUMIsGuestVmxPhysIntrEnabled(pCtx))
5108 {
5109 /* Nested-guest external interrupt VM-exit. */
5110 if ( CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
5111 && !CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
5112 {
5113 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
5114 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5115 return rcStrict;
5116 }
5117
5118 /*
5119 * Fetch the external interrupt from the interrupt controller.
5120 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it or pass it to
5121 * the nested-hypervisor. We cannot re-request the interrupt from the controller again.
5122 */
5123 uint8_t u8Interrupt;
5124 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5125 if (RT_SUCCESS(rc))
5126 {
5127 /* Nested-guest external interrupt VM-exit when the "acknowledge interrupt on exit" is enabled. */
5128 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5129 {
5130 Assert(CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT));
5131 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5132 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5133 return rcStrict;
5134 }
5135 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5136 return VINF_SUCCESS;
5137 }
5138 }
5139 }
5140 return VINF_SUCCESS;
5141}
5142#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5143
5144
5145/**
5146 * Injects any pending events into the guest if the guest is in a state to
5147 * receive them.
5148 *
5149 * @returns Strict VBox status code (i.e. informational status codes too).
5150 * @param pVCpu The cross context virtual CPU structure.
5151 * @param pVmcsInfo The VMCS information structure.
5152 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5153 * @param fIntrState The VT-x guest-interruptibility state.
5154 * @param fStepping Whether we are single-stepping the guest using the
5155 * hypervisor debugger and should return
5156 * VINF_EM_DBG_STEPPED if the event was dispatched
5157 * directly.
5158 */
5159static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5160 uint32_t fIntrState, bool fStepping)
5161{
5162 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5163#ifndef IN_NEM_DARWIN
5164 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5165#endif
5166
5167#ifdef VBOX_STRICT
5168 /*
5169 * Verify guest-interruptibility state.
5170 *
5171 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5172 * since injecting an event may modify the interruptibility state and we must thus always
5173 * use fIntrState.
5174 */
5175 {
5176 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5177 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5178 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5179 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5180 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5181 Assert(!TRPMHasTrap(pVCpu));
5182 NOREF(fBlockMovSS); NOREF(fBlockSti);
5183 }
5184#endif
5185
5186 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5187 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5188 {
5189 /*
5190 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5191 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5192 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5193 *
5194 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5195 */
5196 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5197#ifdef VBOX_STRICT
5198 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5199 {
5200 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5201 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5202 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5203 }
5204 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5205 {
5206 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5207 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5208 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5209 }
5210#endif
5211 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5212 uIntType));
5213
5214 /*
5215 * Inject the event and get any changes to the guest-interruptibility state.
5216 *
5217 * The guest-interruptibility state may need to be updated if we inject the event
5218 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5219 */
5220 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5221 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5222
5223 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5224 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5225 else
5226 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5227 }
5228
5229 /*
5230 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5231 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5232 */
5233 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5234 && !fIsNestedGuest)
5235 {
5236 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5237
5238 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5239 {
5240 /*
5241 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5242 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5243 */
5244 Assert(!DBGFIsStepping(pVCpu));
5245 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5246 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5247 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5248 AssertRC(rc);
5249 }
5250 else
5251 {
5252 /*
5253 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5254 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5255 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5256 * we use MTF, so just make sure it's called before executing guest-code.
5257 */
5258 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5259 }
5260 }
5261 /* else: for nested-guest currently handling while merging controls. */
5262
5263 /*
5264 * Finally, update the guest-interruptibility state.
5265 *
5266 * This is required for the real-on-v86 software interrupt injection, for
5267 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5268 */
5269 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5270 AssertRC(rc);
5271
5272 /*
5273 * There's no need to clear the VM-entry interruption-information field here if we're not
5274 * injecting anything. VT-x clears the valid bit on every VM-exit.
5275 *
5276 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5277 */
5278
5279 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5280 return rcStrict;
5281}
5282
5283
5284/**
5285 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5286 * and update error record fields accordingly.
5287 *
5288 * @returns VMX_IGS_* error codes.
5289 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5290 * wrong with the guest state.
5291 *
5292 * @param pVCpu The cross context virtual CPU structure.
5293 * @param pVmcsInfo The VMCS info. object.
5294 *
5295 * @remarks This function assumes our cache of the VMCS controls
5296 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5297 */
5298static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5299{
5300#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5301#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { uError = (err); break; } else do { } while (0)
5302
5303 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5304 uint32_t uError = VMX_IGS_ERROR;
5305 uint32_t u32IntrState = 0;
5306#ifndef IN_NEM_DARWIN
5307 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5308 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5309#else
5310 bool const fUnrestrictedGuest = true;
5311#endif
5312 do
5313 {
5314 int rc;
5315
5316 /*
5317 * Guest-interruptibility state.
5318 *
5319 * Read this first so that any check that fails prior to those that actually
5320 * require the guest-interruptibility state would still reflect the correct
5321 * VMCS value and avoids causing further confusion.
5322 */
5323 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5324 AssertRC(rc);
5325
5326 uint32_t u32Val;
5327 uint64_t u64Val;
5328
5329 /*
5330 * CR0.
5331 */
5332 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5333 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5334 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5335 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5336 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5337 if (fUnrestrictedGuest)
5338 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5339
5340 uint64_t u64GuestCr0;
5341 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5342 AssertRC(rc);
5343 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5344 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5345 if ( !fUnrestrictedGuest
5346 && (u64GuestCr0 & X86_CR0_PG)
5347 && !(u64GuestCr0 & X86_CR0_PE))
5348 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5349
5350 /*
5351 * CR4.
5352 */
5353 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5354 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5355 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5356
5357 uint64_t u64GuestCr4;
5358 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5359 AssertRC(rc);
5360 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5361 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5362
5363 /*
5364 * IA32_DEBUGCTL MSR.
5365 */
5366 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5367 AssertRC(rc);
5368 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5369 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5370 {
5371 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5372 }
5373 uint64_t u64DebugCtlMsr = u64Val;
5374
5375#ifdef VBOX_STRICT
5376 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5377 AssertRC(rc);
5378 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5379#endif
5380 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5381
5382 /*
5383 * RIP and RFLAGS.
5384 */
5385 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5386 AssertRC(rc);
5387 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5388 if ( !fLongModeGuest
5389 || !pCtx->cs.Attr.n.u1Long)
5390 {
5391 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5392 }
5393 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5394 * must be identical if the "IA-32e mode guest" VM-entry
5395 * control is 1 and CS.L is 1. No check applies if the
5396 * CPU supports 64 linear-address bits. */
5397
5398 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5399 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5400 AssertRC(rc);
5401 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5402 VMX_IGS_RFLAGS_RESERVED);
5403 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5404 uint32_t const u32Eflags = u64Val;
5405
5406 if ( fLongModeGuest
5407 || ( fUnrestrictedGuest
5408 && !(u64GuestCr0 & X86_CR0_PE)))
5409 {
5410 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5411 }
5412
5413 uint32_t u32EntryInfo;
5414 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5415 AssertRC(rc);
5416 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5417 {
5418 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5419 }
5420
5421 /*
5422 * 64-bit checks.
5423 */
5424 if (fLongModeGuest)
5425 {
5426 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5427 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5428 }
5429
5430 if ( !fLongModeGuest
5431 && (u64GuestCr4 & X86_CR4_PCIDE))
5432 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5433
5434 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5435 * 51:32 beyond the processor's physical-address width are 0. */
5436
5437 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5438 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5439 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5440
5441#ifndef IN_NEM_DARWIN
5442 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5443 AssertRC(rc);
5444 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5445
5446 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5447 AssertRC(rc);
5448 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5449#endif
5450
5451 /*
5452 * PERF_GLOBAL MSR.
5453 */
5454 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5455 {
5456 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5457 AssertRC(rc);
5458 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5459 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5460 }
5461
5462 /*
5463 * PAT MSR.
5464 */
5465 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5466 {
5467 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5468 AssertRC(rc);
5469 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5470 for (unsigned i = 0; i < 8; i++)
5471 {
5472 uint8_t u8Val = (u64Val & 0xff);
5473 if ( u8Val != 0 /* UC */
5474 && u8Val != 1 /* WC */
5475 && u8Val != 4 /* WT */
5476 && u8Val != 5 /* WP */
5477 && u8Val != 6 /* WB */
5478 && u8Val != 7 /* UC- */)
5479 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5480 u64Val >>= 8;
5481 }
5482 }
5483
5484 /*
5485 * EFER MSR.
5486 */
5487 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5488 {
5489 Assert(g_fHmVmxSupportsVmcsEfer);
5490 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5491 AssertRC(rc);
5492 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5493 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5494 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5495 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5496 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5497 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5498 * iemVmxVmentryCheckGuestState(). */
5499 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5500 || !(u64GuestCr0 & X86_CR0_PG)
5501 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5502 VMX_IGS_EFER_LMA_LME_MISMATCH);
5503 }
5504
5505 /*
5506 * Segment registers.
5507 */
5508 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5509 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5510 if (!(u32Eflags & X86_EFL_VM))
5511 {
5512 /* CS */
5513 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5514 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5515 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5516 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5517 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5518 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5519 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5520 /* CS cannot be loaded with NULL in protected mode. */
5521 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5522 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5523 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5524 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5525 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5526 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5527 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5528 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5529 else
5530 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5531
5532 /* SS */
5533 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5534 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5535 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5536 if ( !(pCtx->cr0 & X86_CR0_PE)
5537 || pCtx->cs.Attr.n.u4Type == 3)
5538 {
5539 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5540 }
5541
5542 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5543 {
5544 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5545 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5546 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5547 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5548 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5549 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5550 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5551 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5552 }
5553
5554 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5555 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5556 {
5557 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5558 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5559 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5560 || pCtx->ds.Attr.n.u4Type > 11
5561 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5562 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5563 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5564 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5565 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5566 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5567 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5568 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5569 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5570 }
5571 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5572 {
5573 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5574 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5575 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5576 || pCtx->es.Attr.n.u4Type > 11
5577 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5578 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5579 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5580 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5581 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5582 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5583 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5584 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5585 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5586 }
5587 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5588 {
5589 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5590 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5591 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5592 || pCtx->fs.Attr.n.u4Type > 11
5593 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5594 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5595 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5596 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5597 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5598 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5599 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5600 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5601 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5602 }
5603 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5604 {
5605 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5606 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5607 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5608 || pCtx->gs.Attr.n.u4Type > 11
5609 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5610 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5611 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5612 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5613 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5614 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5615 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5616 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5617 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5618 }
5619 /* 64-bit capable CPUs. */
5620 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5621 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5622 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5623 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5624 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5625 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5626 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5627 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5628 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5629 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5630 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5631 }
5632 else
5633 {
5634 /* V86 mode checks. */
5635 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5636 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5637 {
5638 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5639 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5640 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5641 }
5642 else
5643 {
5644 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5645 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5646 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5647 }
5648
5649 /* CS */
5650 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5651 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5652 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5653 /* SS */
5654 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5655 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5656 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5657 /* DS */
5658 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5659 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5660 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5661 /* ES */
5662 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5663 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5664 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5665 /* FS */
5666 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5667 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5668 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5669 /* GS */
5670 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5671 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5672 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5673 /* 64-bit capable CPUs. */
5674 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5675 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5676 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5677 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5678 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5679 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5680 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5681 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5682 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5683 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5684 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5685 }
5686
5687 /*
5688 * TR.
5689 */
5690 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5691 /* 64-bit capable CPUs. */
5692 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5693 if (fLongModeGuest)
5694 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5695 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5696 else
5697 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5698 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5699 VMX_IGS_TR_ATTR_TYPE_INVALID);
5700 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5701 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5702 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5703 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5704 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5705 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5706 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5707 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5708
5709 /*
5710 * GDTR and IDTR (64-bit capable checks).
5711 */
5712 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5713 AssertRC(rc);
5714 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5715
5716 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5717 AssertRC(rc);
5718 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5719
5720 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5721 AssertRC(rc);
5722 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5723
5724 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5725 AssertRC(rc);
5726 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5727
5728 /*
5729 * Guest Non-Register State.
5730 */
5731 /* Activity State. */
5732 uint32_t u32ActivityState;
5733 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5734 AssertRC(rc);
5735 HMVMX_CHECK_BREAK( !u32ActivityState
5736 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5737 VMX_IGS_ACTIVITY_STATE_INVALID);
5738 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5739 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5740
5741 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5742 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5743 {
5744 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5745 }
5746
5747 /** @todo Activity state and injecting interrupts. Left as a todo since we
5748 * currently don't use activity states but ACTIVE. */
5749
5750 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5751 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5752
5753 /* Guest interruptibility-state. */
5754 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5755 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5756 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5757 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5758 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5759 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5760 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5761 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5762 {
5763 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5764 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5765 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5766 }
5767 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5768 {
5769 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5770 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5771 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5772 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5773 }
5774 /** @todo Assumes the processor is not in SMM. */
5775 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5776 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5777 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5778 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5779 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5780 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5781 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5782 {
5783 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5784 }
5785
5786 /* Pending debug exceptions. */
5787 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5788 AssertRC(rc);
5789 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5790 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5791 u32Val = u64Val; /* For pending debug exceptions checks below. */
5792
5793 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5794 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5795 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5796 {
5797 if ( (u32Eflags & X86_EFL_TF)
5798 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5799 {
5800 /* Bit 14 is PendingDebug.BS. */
5801 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5802 }
5803 if ( !(u32Eflags & X86_EFL_TF)
5804 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5805 {
5806 /* Bit 14 is PendingDebug.BS. */
5807 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5808 }
5809 }
5810
5811#ifndef IN_NEM_DARWIN
5812 /* VMCS link pointer. */
5813 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5814 AssertRC(rc);
5815 if (u64Val != UINT64_C(0xffffffffffffffff))
5816 {
5817 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5818 /** @todo Bits beyond the processor's physical-address width MBZ. */
5819 /** @todo SMM checks. */
5820 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5821 Assert(pVmcsInfo->pvShadowVmcs);
5822 VMXVMCSREVID VmcsRevId;
5823 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5824 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5825 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5826 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5827 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5828 }
5829
5830 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5831 * not using nested paging? */
5832 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5833 && !fLongModeGuest
5834 && CPUMIsGuestInPAEModeEx(pCtx))
5835 {
5836 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5837 AssertRC(rc);
5838 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5839
5840 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5841 AssertRC(rc);
5842 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5843
5844 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5845 AssertRC(rc);
5846 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5847
5848 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5849 AssertRC(rc);
5850 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5851 }
5852#endif
5853
5854 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5855 if (uError == VMX_IGS_ERROR)
5856 uError = VMX_IGS_REASON_NOT_FOUND;
5857 } while (0);
5858
5859 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5860 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5861 return uError;
5862
5863#undef HMVMX_ERROR_BREAK
5864#undef HMVMX_CHECK_BREAK
5865}
5866
5867
5868#ifndef HMVMX_USE_FUNCTION_TABLE
5869/**
5870 * Handles a guest VM-exit from hardware-assisted VMX execution.
5871 *
5872 * @returns Strict VBox status code (i.e. informational status codes too).
5873 * @param pVCpu The cross context virtual CPU structure.
5874 * @param pVmxTransient The VMX-transient structure.
5875 */
5876DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5877{
5878#ifdef DEBUG_ramshankar
5879# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5880 do { \
5881 if (a_fSave != 0) \
5882 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5883 VBOXSTRICTRC rcStrict = a_CallExpr; \
5884 if (a_fSave != 0) \
5885 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5886 return rcStrict; \
5887 } while (0)
5888#else
5889# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5890#endif
5891 uint32_t const uExitReason = pVmxTransient->uExitReason;
5892 switch (uExitReason)
5893 {
5894 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5895 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5896 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5897 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5898 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5899 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5900 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5901 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5902 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5903 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5904 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5905 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5906 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5907 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5908 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5909 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5910 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5911 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5912 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5913 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5914 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5915 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5916 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5917 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5918 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5919 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5920 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5921 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5922 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5923 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5924#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5925 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5926 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5927 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5928 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5929 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5930 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5931 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5932 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5933 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5934 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5935#else
5936 case VMX_EXIT_VMCLEAR:
5937 case VMX_EXIT_VMLAUNCH:
5938 case VMX_EXIT_VMPTRLD:
5939 case VMX_EXIT_VMPTRST:
5940 case VMX_EXIT_VMREAD:
5941 case VMX_EXIT_VMRESUME:
5942 case VMX_EXIT_VMWRITE:
5943 case VMX_EXIT_VMXOFF:
5944 case VMX_EXIT_VMXON:
5945 case VMX_EXIT_INVVPID:
5946 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5947#endif
5948#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5949 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5950#else
5951 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5952#endif
5953
5954 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5955 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5956 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5957
5958 case VMX_EXIT_INIT_SIGNAL:
5959 case VMX_EXIT_SIPI:
5960 case VMX_EXIT_IO_SMI:
5961 case VMX_EXIT_SMI:
5962 case VMX_EXIT_ERR_MSR_LOAD:
5963 case VMX_EXIT_ERR_MACHINE_CHECK:
5964 case VMX_EXIT_PML_FULL:
5965 case VMX_EXIT_VIRTUALIZED_EOI:
5966 case VMX_EXIT_GDTR_IDTR_ACCESS:
5967 case VMX_EXIT_LDTR_TR_ACCESS:
5968 case VMX_EXIT_APIC_WRITE:
5969 case VMX_EXIT_RDRAND:
5970 case VMX_EXIT_RSM:
5971 case VMX_EXIT_VMFUNC:
5972 case VMX_EXIT_ENCLS:
5973 case VMX_EXIT_RDSEED:
5974 case VMX_EXIT_XSAVES:
5975 case VMX_EXIT_XRSTORS:
5976 case VMX_EXIT_UMWAIT:
5977 case VMX_EXIT_TPAUSE:
5978 case VMX_EXIT_LOADIWKEY:
5979 default:
5980 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5981 }
5982#undef VMEXIT_CALL_RET
5983}
5984#endif /* !HMVMX_USE_FUNCTION_TABLE */
5985
5986
5987#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5988/**
5989 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5990 *
5991 * @returns Strict VBox status code (i.e. informational status codes too).
5992 * @param pVCpu The cross context virtual CPU structure.
5993 * @param pVmxTransient The VMX-transient structure.
5994 */
5995DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5996{
5997#ifdef DEBUG_ramshankar
5998# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5999 do { \
6000 if (a_fSave != 0) \
6001 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
6002 VBOXSTRICTRC rcStrict = a_CallExpr; \
6003 return rcStrict; \
6004 } while (0)
6005#else
6006# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
6007#endif
6008
6009 uint32_t const uExitReason = pVmxTransient->uExitReason;
6010 switch (uExitReason)
6011 {
6012# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6013 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient));
6014 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolationNested(pVCpu, pVmxTransient));
6015# else
6016 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
6017 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
6018# endif
6019 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient));
6020 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstrNested(pVCpu, pVmxTransient));
6021 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHltNested(pVCpu, pVmxTransient));
6022
6023 /*
6024 * We shouldn't direct host physical interrupts to the nested-guest.
6025 */
6026 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
6027
6028 /*
6029 * Instructions that cause VM-exits unconditionally or the condition is
6030 * always taken solely from the nested hypervisor (meaning if the VM-exit
6031 * happens, it's guaranteed to be a nested-guest VM-exit).
6032 *
6033 * - Provides VM-exit instruction length ONLY.
6034 */
6035 case VMX_EXIT_CPUID: /* Unconditional. */
6036 case VMX_EXIT_VMCALL:
6037 case VMX_EXIT_GETSEC:
6038 case VMX_EXIT_INVD:
6039 case VMX_EXIT_XSETBV:
6040 case VMX_EXIT_VMLAUNCH:
6041 case VMX_EXIT_VMRESUME:
6042 case VMX_EXIT_VMXOFF:
6043 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
6044 case VMX_EXIT_VMFUNC:
6045 VMEXIT_CALL_RET(0, vmxHCExitInstrNested(pVCpu, pVmxTransient));
6046
6047 /*
6048 * Instructions that cause VM-exits unconditionally or the condition is
6049 * always taken solely from the nested hypervisor (meaning if the VM-exit
6050 * happens, it's guaranteed to be a nested-guest VM-exit).
6051 *
6052 * - Provides VM-exit instruction length.
6053 * - Provides VM-exit information.
6054 * - Optionally provides Exit qualification.
6055 *
6056 * Since Exit qualification is 0 for all VM-exits where it is not
6057 * applicable, reading and passing it to the guest should produce
6058 * defined behavior.
6059 *
6060 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
6061 */
6062 case VMX_EXIT_INVEPT: /* Unconditional. */
6063 case VMX_EXIT_INVVPID:
6064 case VMX_EXIT_VMCLEAR:
6065 case VMX_EXIT_VMPTRLD:
6066 case VMX_EXIT_VMPTRST:
6067 case VMX_EXIT_VMXON:
6068 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
6069 case VMX_EXIT_LDTR_TR_ACCESS:
6070 case VMX_EXIT_RDRAND:
6071 case VMX_EXIT_RDSEED:
6072 case VMX_EXIT_XSAVES:
6073 case VMX_EXIT_XRSTORS:
6074 case VMX_EXIT_UMWAIT:
6075 case VMX_EXIT_TPAUSE:
6076 VMEXIT_CALL_RET(0, vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient));
6077
6078 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtscNested(pVCpu, pVmxTransient));
6079 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscpNested(pVCpu, pVmxTransient));
6080 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsrNested(pVCpu, pVmxTransient));
6081 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsrNested(pVCpu, pVmxTransient));
6082 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpgNested(pVCpu, pVmxTransient));
6083 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcidNested(pVCpu, pVmxTransient));
6084 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient));
6085 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvdNested(pVCpu, pVmxTransient));
6086 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtfNested(pVCpu, pVmxTransient));
6087 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccessNested(pVCpu, pVmxTransient));
6088 case VMX_EXIT_APIC_WRITE: VMEXIT_CALL_RET(0, vmxHCExitApicWriteNested(pVCpu, pVmxTransient));
6089 case VMX_EXIT_VIRTUALIZED_EOI: VMEXIT_CALL_RET(0, vmxHCExitVirtEoiNested(pVCpu, pVmxTransient));
6090 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRxNested(pVCpu, pVmxTransient));
6091 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindowNested(pVCpu, pVmxTransient));
6092 case VMX_EXIT_NMI_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitNmiWindowNested(pVCpu, pVmxTransient));
6093 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient));
6094 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwaitNested(pVCpu, pVmxTransient));
6095 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitorNested(pVCpu, pVmxTransient));
6096 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPauseNested(pVCpu, pVmxTransient));
6097
6098 case VMX_EXIT_PREEMPT_TIMER:
6099 {
6100 /** @todo NSTVMX: Preempt timer. */
6101 VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
6102 }
6103
6104 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRxNested(pVCpu, pVmxTransient));
6105 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmcNested(pVCpu, pVmxTransient));
6106
6107 case VMX_EXIT_VMREAD:
6108 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient));
6109
6110 case VMX_EXIT_TRIPLE_FAULT: VMEXIT_CALL_RET(0, vmxHCExitTripleFaultNested(pVCpu, pVmxTransient));
6111 case VMX_EXIT_ERR_INVALID_GUEST_STATE: VMEXIT_CALL_RET(0, vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient));
6112
6113 case VMX_EXIT_INIT_SIGNAL:
6114 case VMX_EXIT_SIPI:
6115 case VMX_EXIT_IO_SMI:
6116 case VMX_EXIT_SMI:
6117 case VMX_EXIT_ERR_MSR_LOAD:
6118 case VMX_EXIT_ERR_MACHINE_CHECK:
6119 case VMX_EXIT_PML_FULL:
6120 case VMX_EXIT_RSM:
6121 default:
6122 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6123 }
6124#undef VMEXIT_CALL_RET
6125}
6126#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6127
6128
6129/** @name VM-exit helpers.
6130 * @{
6131 */
6132/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6133/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6134/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6135
6136/** Macro for VM-exits called unexpectedly. */
6137#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6138 do { \
6139 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6140 return VERR_VMX_UNEXPECTED_EXIT; \
6141 } while (0)
6142
6143#ifdef VBOX_STRICT
6144# ifndef IN_NEM_DARWIN
6145/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6146# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6147 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6148
6149# define HMVMX_ASSERT_PREEMPT_CPUID() \
6150 do { \
6151 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6152 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6153 } while (0)
6154
6155# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6156 do { \
6157 AssertPtr((a_pVCpu)); \
6158 AssertPtr((a_pVmxTransient)); \
6159 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6160 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6161 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6162 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6163 Assert((a_pVmxTransient)->pVmcsInfo); \
6164 Assert(ASMIntAreEnabled()); \
6165 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6166 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6167 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6168 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6169 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6170 HMVMX_ASSERT_PREEMPT_CPUID(); \
6171 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6172 } while (0)
6173# else
6174# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6175# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6176# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6177 do { \
6178 AssertPtr((a_pVCpu)); \
6179 AssertPtr((a_pVmxTransient)); \
6180 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6181 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6182 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6183 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6184 Assert((a_pVmxTransient)->pVmcsInfo); \
6185 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6186 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6187 } while (0)
6188# endif
6189
6190# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6191 do { \
6192 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6193 Assert((a_pVmxTransient)->fIsNestedGuest); \
6194 } while (0)
6195
6196# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6197 do { \
6198 Log4Func(("\n")); \
6199 } while (0)
6200#else
6201# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6202 do { \
6203 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6204 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6205 } while (0)
6206
6207# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6208 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6209
6210# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6211#endif
6212
6213#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6214/** Macro that does the necessary privilege checks and intercepted VM-exits for
6215 * guests that attempted to execute a VMX instruction. */
6216# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6217 do \
6218 { \
6219 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6220 if (rcStrictTmp == VINF_SUCCESS) \
6221 { /* likely */ } \
6222 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6223 { \
6224 Assert((a_pVCpu)->hm.s.Event.fPending); \
6225 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6226 return VINF_SUCCESS; \
6227 } \
6228 else \
6229 { \
6230 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6231 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6232 } \
6233 } while (0)
6234
6235/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6236# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6237 do \
6238 { \
6239 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6240 (a_pGCPtrEffAddr)); \
6241 if (rcStrictTmp == VINF_SUCCESS) \
6242 { /* likely */ } \
6243 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6244 { \
6245 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6246 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6247 NOREF(uXcptTmp); \
6248 return VINF_SUCCESS; \
6249 } \
6250 else \
6251 { \
6252 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6253 return rcStrictTmp; \
6254 } \
6255 } while (0)
6256#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6257
6258
6259/**
6260 * Advances the guest RIP by the specified number of bytes.
6261 *
6262 * @param pVCpu The cross context virtual CPU structure.
6263 * @param cbInstr Number of bytes to advance the RIP by.
6264 *
6265 * @remarks No-long-jump zone!!!
6266 */
6267DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6268{
6269 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6270
6271 /*
6272 * Advance RIP.
6273 *
6274 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6275 * when the addition causes a "carry" into the upper half and check whether
6276 * we're in 64-bit and can go on with it or wether we should zap the top
6277 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6278 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6279 *
6280 * See PC wrap around tests in bs3-cpu-weird-1.
6281 */
6282 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6283 uint64_t const uRipNext = uRipPrev + cbInstr;
6284 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6285 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6286 pVCpu->cpum.GstCtx.rip = uRipNext;
6287 else
6288 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6289
6290 /*
6291 * Clear RF and interrupt shadowing.
6292 */
6293 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6294 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6295 else
6296 {
6297 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6298 {
6299 /** @todo \#DB - single step. */
6300 }
6301 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6302 }
6303 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6304
6305 /* Mark both RIP and RFLAGS as updated. */
6306 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6307}
6308
6309
6310/**
6311 * Advances the guest RIP after reading it from the VMCS.
6312 *
6313 * @returns VBox status code, no informational status codes.
6314 * @param pVCpu The cross context virtual CPU structure.
6315 * @param pVmxTransient The VMX-transient structure.
6316 *
6317 * @remarks No-long-jump zone!!!
6318 */
6319static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6320{
6321 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6322 /** @todo consider template here after checking callers. */
6323 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6324 AssertRCReturn(rc, rc);
6325
6326 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6327 return VINF_SUCCESS;
6328}
6329
6330
6331/**
6332 * Handle a condition that occurred while delivering an event through the guest or
6333 * nested-guest IDT.
6334 *
6335 * @returns Strict VBox status code (i.e. informational status codes too).
6336 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6337 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6338 * to continue execution of the guest which will delivery the \#DF.
6339 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6340 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6341 *
6342 * @param pVCpu The cross context virtual CPU structure.
6343 * @param pVmxTransient The VMX-transient structure.
6344 *
6345 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6346 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6347 * is due to an EPT violation, PML full or SPP-related event.
6348 *
6349 * @remarks No-long-jump zone!!!
6350 */
6351static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6352{
6353 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6354 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6355 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6356 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6357 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6358 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6359
6360 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6361 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6362 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6363 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6364 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6365 {
6366 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6367 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6368
6369 /*
6370 * If the event was a software interrupt (generated with INT n) or a software exception
6371 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6372 * can handle the VM-exit and continue guest execution which will re-execute the
6373 * instruction rather than re-injecting the exception, as that can cause premature
6374 * trips to ring-3 before injection and involve TRPM which currently has no way of
6375 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6376 * the problem).
6377 */
6378 IEMXCPTRAISE enmRaise;
6379 IEMXCPTRAISEINFO fRaiseInfo;
6380 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6381 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6382 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6383 {
6384 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6385 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6386 }
6387 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6388 {
6389 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6390 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6391 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6392
6393 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6394 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6395
6396 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6397
6398 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6399 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6400 {
6401 pVmxTransient->fVectoringPF = true;
6402 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6403 }
6404 }
6405 else
6406 {
6407 /*
6408 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6409 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6410 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6411 */
6412 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6413 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6414 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6415 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6416 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6417 }
6418
6419 /*
6420 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6421 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6422 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6423 * subsequent VM-entry would fail, see @bugref{7445}.
6424 *
6425 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6426 */
6427 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6428 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6429 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6430 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6431 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6432
6433 switch (enmRaise)
6434 {
6435 case IEMXCPTRAISE_CURRENT_XCPT:
6436 {
6437 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6438 Assert(rcStrict == VINF_SUCCESS);
6439 break;
6440 }
6441
6442 case IEMXCPTRAISE_PREV_EVENT:
6443 {
6444 uint32_t u32ErrCode;
6445 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6446 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6447 else
6448 u32ErrCode = 0;
6449
6450 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6451 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6452 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6453 pVCpu->cpum.GstCtx.cr2);
6454
6455 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6456 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6457 Assert(rcStrict == VINF_SUCCESS);
6458 break;
6459 }
6460
6461 case IEMXCPTRAISE_REEXEC_INSTR:
6462 Assert(rcStrict == VINF_SUCCESS);
6463 break;
6464
6465 case IEMXCPTRAISE_DOUBLE_FAULT:
6466 {
6467 /*
6468 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6469 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6470 */
6471 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6472 {
6473 pVmxTransient->fVectoringDoublePF = true;
6474 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6475 pVCpu->cpum.GstCtx.cr2));
6476 rcStrict = VINF_SUCCESS;
6477 }
6478 else
6479 {
6480 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6481 vmxHCSetPendingXcptDF(pVCpu);
6482 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6483 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6484 rcStrict = VINF_HM_DOUBLE_FAULT;
6485 }
6486 break;
6487 }
6488
6489 case IEMXCPTRAISE_TRIPLE_FAULT:
6490 {
6491 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6492 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6493 rcStrict = VINF_EM_RESET;
6494 break;
6495 }
6496
6497 case IEMXCPTRAISE_CPU_HANG:
6498 {
6499 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6500 rcStrict = VERR_EM_GUEST_CPU_HANG;
6501 break;
6502 }
6503
6504 default:
6505 {
6506 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6507 rcStrict = VERR_VMX_IPE_2;
6508 break;
6509 }
6510 }
6511 }
6512 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6513 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6514 {
6515 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6516 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6517 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6518 {
6519 /*
6520 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6521 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6522 * that virtual NMIs remain blocked until the IRET execution is completed.
6523 *
6524 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6525 */
6526 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6527 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6528 }
6529 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6530 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6531 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6532 {
6533 /*
6534 * Execution of IRET caused an EPT violation, page-modification log-full event or
6535 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6536 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6537 * that virtual NMIs remain blocked until the IRET execution is completed.
6538 *
6539 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6540 */
6541 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6542 {
6543 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6544 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6545 }
6546 }
6547 }
6548
6549 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6550 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6551 return rcStrict;
6552}
6553
6554
6555#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6556/**
6557 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6558 * guest attempting to execute a VMX instruction.
6559 *
6560 * @returns Strict VBox status code (i.e. informational status codes too).
6561 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6562 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6563 *
6564 * @param pVCpu The cross context virtual CPU structure.
6565 * @param uExitReason The VM-exit reason.
6566 *
6567 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6568 * @remarks No-long-jump zone!!!
6569 */
6570static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6571{
6572 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6573 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6574
6575 /*
6576 * The physical CPU would have already checked the CPU mode/code segment.
6577 * We shall just assert here for paranoia.
6578 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6579 */
6580 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6581 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6582 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6583
6584 if (uExitReason == VMX_EXIT_VMXON)
6585 {
6586 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6587
6588 /*
6589 * We check CR4.VMXE because it is required to be always set while in VMX operation
6590 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6591 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6592 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6593 */
6594 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6595 {
6596 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6597 vmxHCSetPendingXcptUD(pVCpu);
6598 return VINF_HM_PENDING_XCPT;
6599 }
6600 }
6601 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6602 {
6603 /*
6604 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6605 * (other than VMXON), we need to raise a #UD.
6606 */
6607 Log4Func(("Not in VMX root mode -> #UD\n"));
6608 vmxHCSetPendingXcptUD(pVCpu);
6609 return VINF_HM_PENDING_XCPT;
6610 }
6611
6612 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6613 return VINF_SUCCESS;
6614}
6615
6616
6617/**
6618 * Decodes the memory operand of an instruction that caused a VM-exit.
6619 *
6620 * The Exit qualification field provides the displacement field for memory
6621 * operand instructions, if any.
6622 *
6623 * @returns Strict VBox status code (i.e. informational status codes too).
6624 * @retval VINF_SUCCESS if the operand was successfully decoded.
6625 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6626 * operand.
6627 * @param pVCpu The cross context virtual CPU structure.
6628 * @param uExitInstrInfo The VM-exit instruction information field.
6629 * @param enmMemAccess The memory operand's access type (read or write).
6630 * @param GCPtrDisp The instruction displacement field, if any. For
6631 * RIP-relative addressing pass RIP + displacement here.
6632 * @param pGCPtrMem Where to store the effective destination memory address.
6633 *
6634 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6635 * virtual-8086 mode hence skips those checks while verifying if the
6636 * segment is valid.
6637 */
6638static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6639 PRTGCPTR pGCPtrMem)
6640{
6641 Assert(pGCPtrMem);
6642 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6643 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6644 | CPUMCTX_EXTRN_CR0);
6645
6646 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6647 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6648 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6649
6650 VMXEXITINSTRINFO ExitInstrInfo;
6651 ExitInstrInfo.u = uExitInstrInfo;
6652 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6653 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6654 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6655 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6656 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6657 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6658 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6659 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6660 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6661
6662 /*
6663 * Validate instruction information.
6664 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6665 */
6666 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6667 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6668 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6669 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6670 AssertLogRelMsgReturn(fIsMemOperand,
6671 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6672
6673 /*
6674 * Compute the complete effective address.
6675 *
6676 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6677 * See AMD spec. 4.5.2 "Segment Registers".
6678 */
6679 RTGCPTR GCPtrMem = GCPtrDisp;
6680 if (fBaseRegValid)
6681 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6682 if (fIdxRegValid)
6683 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6684
6685 RTGCPTR const GCPtrOff = GCPtrMem;
6686 if ( !fIsLongMode
6687 || iSegReg >= X86_SREG_FS)
6688 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6689 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6690
6691 /*
6692 * Validate effective address.
6693 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6694 */
6695 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6696 Assert(cbAccess > 0);
6697 if (fIsLongMode)
6698 {
6699 if (X86_IS_CANONICAL(GCPtrMem))
6700 {
6701 *pGCPtrMem = GCPtrMem;
6702 return VINF_SUCCESS;
6703 }
6704
6705 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6706 * "Data Limit Checks in 64-bit Mode". */
6707 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6708 vmxHCSetPendingXcptGP(pVCpu, 0);
6709 return VINF_HM_PENDING_XCPT;
6710 }
6711
6712 /*
6713 * This is a watered down version of iemMemApplySegment().
6714 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6715 * and segment CPL/DPL checks are skipped.
6716 */
6717 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6718 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6719 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6720
6721 /* Check if the segment is present and usable. */
6722 if ( pSel->Attr.n.u1Present
6723 && !pSel->Attr.n.u1Unusable)
6724 {
6725 Assert(pSel->Attr.n.u1DescType);
6726 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6727 {
6728 /* Check permissions for the data segment. */
6729 if ( enmMemAccess == VMXMEMACCESS_WRITE
6730 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6731 {
6732 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6733 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6734 return VINF_HM_PENDING_XCPT;
6735 }
6736
6737 /* Check limits if it's a normal data segment. */
6738 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6739 {
6740 if ( GCPtrFirst32 > pSel->u32Limit
6741 || GCPtrLast32 > pSel->u32Limit)
6742 {
6743 Log4Func(("Data segment limit exceeded. "
6744 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6745 GCPtrLast32, pSel->u32Limit));
6746 if (iSegReg == X86_SREG_SS)
6747 vmxHCSetPendingXcptSS(pVCpu, 0);
6748 else
6749 vmxHCSetPendingXcptGP(pVCpu, 0);
6750 return VINF_HM_PENDING_XCPT;
6751 }
6752 }
6753 else
6754 {
6755 /* Check limits if it's an expand-down data segment.
6756 Note! The upper boundary is defined by the B bit, not the G bit! */
6757 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6758 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6759 {
6760 Log4Func(("Expand-down data segment limit exceeded. "
6761 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6762 GCPtrLast32, pSel->u32Limit));
6763 if (iSegReg == X86_SREG_SS)
6764 vmxHCSetPendingXcptSS(pVCpu, 0);
6765 else
6766 vmxHCSetPendingXcptGP(pVCpu, 0);
6767 return VINF_HM_PENDING_XCPT;
6768 }
6769 }
6770 }
6771 else
6772 {
6773 /* Check permissions for the code segment. */
6774 if ( enmMemAccess == VMXMEMACCESS_WRITE
6775 || ( enmMemAccess == VMXMEMACCESS_READ
6776 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6777 {
6778 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6779 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6780 vmxHCSetPendingXcptGP(pVCpu, 0);
6781 return VINF_HM_PENDING_XCPT;
6782 }
6783
6784 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6785 if ( GCPtrFirst32 > pSel->u32Limit
6786 || GCPtrLast32 > pSel->u32Limit)
6787 {
6788 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6789 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6790 if (iSegReg == X86_SREG_SS)
6791 vmxHCSetPendingXcptSS(pVCpu, 0);
6792 else
6793 vmxHCSetPendingXcptGP(pVCpu, 0);
6794 return VINF_HM_PENDING_XCPT;
6795 }
6796 }
6797 }
6798 else
6799 {
6800 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6801 vmxHCSetPendingXcptGP(pVCpu, 0);
6802 return VINF_HM_PENDING_XCPT;
6803 }
6804
6805 *pGCPtrMem = GCPtrMem;
6806 return VINF_SUCCESS;
6807}
6808#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6809
6810
6811/**
6812 * VM-exit helper for LMSW.
6813 */
6814static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6815{
6816 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6817 AssertRCReturn(rc, rc);
6818
6819 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6820 AssertMsg( rcStrict == VINF_SUCCESS
6821 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6822
6823 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6824 if (rcStrict == VINF_IEM_RAISED_XCPT)
6825 {
6826 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6827 rcStrict = VINF_SUCCESS;
6828 }
6829
6830 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6831 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6832 return rcStrict;
6833}
6834
6835
6836/**
6837 * VM-exit helper for CLTS.
6838 */
6839static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6840{
6841 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6842 AssertRCReturn(rc, rc);
6843
6844 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6845 AssertMsg( rcStrict == VINF_SUCCESS
6846 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6847
6848 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6849 if (rcStrict == VINF_IEM_RAISED_XCPT)
6850 {
6851 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6852 rcStrict = VINF_SUCCESS;
6853 }
6854
6855 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6856 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6857 return rcStrict;
6858}
6859
6860
6861/**
6862 * VM-exit helper for MOV from CRx (CRx read).
6863 */
6864static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6865{
6866 Assert(iCrReg < 16);
6867 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6868
6869 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6870 AssertRCReturn(rc, rc);
6871
6872 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6873 AssertMsg( rcStrict == VINF_SUCCESS
6874 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6875
6876 if (iGReg == X86_GREG_xSP)
6877 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6878 else
6879 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6880#ifdef VBOX_WITH_STATISTICS
6881 switch (iCrReg)
6882 {
6883 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6884 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6885 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6886 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6887 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6888 }
6889#endif
6890 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6891 return rcStrict;
6892}
6893
6894
6895/**
6896 * VM-exit helper for MOV to CRx (CRx write).
6897 */
6898static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6899{
6900 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6901
6902 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6903 AssertMsg( rcStrict == VINF_SUCCESS
6904 || rcStrict == VINF_IEM_RAISED_XCPT
6905 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6906
6907 switch (iCrReg)
6908 {
6909 case 0:
6910 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6911 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6912 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6913 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6914 break;
6915
6916 case 2:
6917 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6918 /* Nothing to do here, CR2 it's not part of the VMCS. */
6919 break;
6920
6921 case 3:
6922 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6923 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6924 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6925 break;
6926
6927 case 4:
6928 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6929 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6930#ifndef IN_NEM_DARWIN
6931 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6932 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6933#else
6934 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6935#endif
6936 break;
6937
6938 case 8:
6939 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6940 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6941 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6942 break;
6943
6944 default:
6945 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6946 break;
6947 }
6948
6949 if (rcStrict == VINF_IEM_RAISED_XCPT)
6950 {
6951 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6952 rcStrict = VINF_SUCCESS;
6953 }
6954 return rcStrict;
6955}
6956
6957
6958/**
6959 * VM-exit exception handler for \#PF (Page-fault exception).
6960 *
6961 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6962 */
6963static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6964{
6965 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6966 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6967
6968#ifndef IN_NEM_DARWIN
6969 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6970 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6971 { /* likely */ }
6972 else
6973#endif
6974 {
6975#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6976 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6977#endif
6978 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6979 if (!pVmxTransient->fVectoringDoublePF)
6980 {
6981 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6982 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6983 }
6984 else
6985 {
6986 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6987 Assert(!pVmxTransient->fIsNestedGuest);
6988 vmxHCSetPendingXcptDF(pVCpu);
6989 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6990 }
6991 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6992 return VINF_SUCCESS;
6993 }
6994
6995 Assert(!pVmxTransient->fIsNestedGuest);
6996
6997 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6998 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6999 if (pVmxTransient->fVectoringPF)
7000 {
7001 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7002 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7003 }
7004
7005 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7006 AssertRCReturn(rc, rc);
7007
7008 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
7009 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
7010
7011 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
7012 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
7013
7014 Log4Func(("#PF: rc=%Rrc\n", rc));
7015 if (rc == VINF_SUCCESS)
7016 {
7017 /*
7018 * This is typically a shadow page table sync or a MMIO instruction. But we may have
7019 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
7020 */
7021 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7022 TRPMResetTrap(pVCpu);
7023 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
7024 return rc;
7025 }
7026
7027 if (rc == VINF_EM_RAW_GUEST_TRAP)
7028 {
7029 if (!pVmxTransient->fVectoringDoublePF)
7030 {
7031 /* It's a guest page fault and needs to be reflected to the guest. */
7032 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
7033 TRPMResetTrap(pVCpu);
7034 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
7035 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
7036 uGstErrorCode, pVmxTransient->uExitQual);
7037 }
7038 else
7039 {
7040 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7041 TRPMResetTrap(pVCpu);
7042 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
7043 vmxHCSetPendingXcptDF(pVCpu);
7044 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
7045 }
7046
7047 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
7048 return VINF_SUCCESS;
7049 }
7050
7051 TRPMResetTrap(pVCpu);
7052 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
7053 return rc;
7054}
7055
7056
7057/**
7058 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
7059 *
7060 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7061 */
7062static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7063{
7064 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7065 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
7066
7067 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7068 AssertRCReturn(rc, rc);
7069
7070 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
7071 {
7072 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
7073 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
7074
7075 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
7076 * provides VM-exit instruction length. If this causes problem later,
7077 * disassemble the instruction like it's done on AMD-V. */
7078 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7079 AssertRCReturn(rc2, rc2);
7080 return rc;
7081 }
7082
7083 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
7084 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7085 return VINF_SUCCESS;
7086}
7087
7088
7089/**
7090 * VM-exit exception handler for \#BP (Breakpoint exception).
7091 *
7092 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7093 */
7094static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7095{
7096 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7097 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
7098
7099 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7100 AssertRCReturn(rc, rc);
7101
7102 VBOXSTRICTRC rcStrict;
7103 if (!pVmxTransient->fIsNestedGuest)
7104 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
7105 else
7106 rcStrict = VINF_EM_RAW_GUEST_TRAP;
7107
7108 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7109 {
7110 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7111 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7112 rcStrict = VINF_SUCCESS;
7113 }
7114
7115 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
7116 return rcStrict;
7117}
7118
7119
7120/**
7121 * VM-exit exception handler for \#AC (Alignment-check exception).
7122 *
7123 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7124 */
7125static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7126{
7127 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7128
7129 /*
7130 * Detect #ACs caused by host having enabled split-lock detection.
7131 * Emulate such instructions.
7132 */
7133#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
7134 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7135 AssertRCReturn(rc, rc);
7136 /** @todo detect split lock in cpu feature? */
7137 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
7138 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7139 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
7140 || CPUMGetGuestCPL(pVCpu) != 3
7141 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
7142 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7143 {
7144 /*
7145 * Check for debug/trace events and import state accordingly.
7146 */
7147 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7148 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7149 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7150#ifndef IN_NEM_DARWIN
7151 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7152#endif
7153 )
7154 {
7155 if (pVM->cCpus == 1)
7156 {
7157#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7158 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7159 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7160#else
7161 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7162 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7163#endif
7164 AssertRCReturn(rc, rc);
7165 }
7166 }
7167 else
7168 {
7169 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7170 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7171 AssertRCReturn(rc, rc);
7172
7173 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7174
7175 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7176 {
7177 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7178 if (rcStrict != VINF_SUCCESS)
7179 return rcStrict;
7180 }
7181 }
7182
7183 /*
7184 * Emulate the instruction.
7185 *
7186 * We have to ignore the LOCK prefix here as we must not retrigger the
7187 * detection on the host. This isn't all that satisfactory, though...
7188 */
7189 if (pVM->cCpus == 1)
7190 {
7191 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7192 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7193
7194 /** @todo For SMP configs we should do a rendezvous here. */
7195 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7196 if (rcStrict == VINF_SUCCESS)
7197#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7198 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7199 HM_CHANGED_GUEST_RIP
7200 | HM_CHANGED_GUEST_RFLAGS
7201 | HM_CHANGED_GUEST_GPRS_MASK
7202 | HM_CHANGED_GUEST_CS
7203 | HM_CHANGED_GUEST_SS);
7204#else
7205 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7206#endif
7207 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7208 {
7209 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7210 rcStrict = VINF_SUCCESS;
7211 }
7212 return rcStrict;
7213 }
7214 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7215 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7216 return VINF_EM_EMULATE_SPLIT_LOCK;
7217 }
7218
7219 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7220 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7221 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7222
7223 /* Re-inject it. We'll detect any nesting before getting here. */
7224 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7225 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7226 return VINF_SUCCESS;
7227}
7228
7229
7230/**
7231 * VM-exit exception handler for \#DB (Debug exception).
7232 *
7233 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7234 */
7235static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7236{
7237 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7238 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7239
7240 /*
7241 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7242 */
7243 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7244
7245 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7246 uint64_t const uDR6 = X86_DR6_INIT_VAL
7247 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7248 | X86_DR6_BD | X86_DR6_BS));
7249 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7250
7251 int rc;
7252 if (!pVmxTransient->fIsNestedGuest)
7253 {
7254 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7255
7256 /*
7257 * Prevents stepping twice over the same instruction when the guest is stepping using
7258 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7259 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7260 */
7261 if ( rc == VINF_EM_DBG_STEPPED
7262 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7263 {
7264 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7265 rc = VINF_EM_RAW_GUEST_TRAP;
7266 }
7267 }
7268 else
7269 rc = VINF_EM_RAW_GUEST_TRAP;
7270 Log6Func(("rc=%Rrc\n", rc));
7271 if (rc == VINF_EM_RAW_GUEST_TRAP)
7272 {
7273 /*
7274 * The exception was for the guest. Update DR6, DR7.GD and
7275 * IA32_DEBUGCTL.LBR before forwarding it.
7276 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7277 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7278 */
7279#ifndef IN_NEM_DARWIN
7280 VMMRZCallRing3Disable(pVCpu);
7281 HM_DISABLE_PREEMPT(pVCpu);
7282
7283 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7284 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7285 if (CPUMIsGuestDebugStateActive(pVCpu))
7286 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7287
7288 HM_RESTORE_PREEMPT();
7289 VMMRZCallRing3Enable(pVCpu);
7290#else
7291 /** @todo */
7292#endif
7293
7294 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7295 AssertRCReturn(rc, rc);
7296
7297 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7298 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7299
7300 /* Paranoia. */
7301 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7302 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7303
7304 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7305 AssertRC(rc);
7306
7307 /*
7308 * Raise #DB in the guest.
7309 *
7310 * It is important to reflect exactly what the VM-exit gave us (preserving the
7311 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7312 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7313 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7314 *
7315 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7316 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7317 */
7318 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7319 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7320 return VINF_SUCCESS;
7321 }
7322
7323 /*
7324 * Not a guest trap, must be a hypervisor related debug event then.
7325 * Update DR6 in case someone is interested in it.
7326 */
7327 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7328 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7329 CPUMSetHyperDR6(pVCpu, uDR6);
7330
7331 return rc;
7332}
7333
7334
7335/**
7336 * Hacks its way around the lovely mesa driver's backdoor accesses.
7337 *
7338 * @sa hmR0SvmHandleMesaDrvGp.
7339 */
7340static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7341{
7342 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7343 RT_NOREF(pCtx);
7344
7345 /* For now we'll just skip the instruction. */
7346 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7347}
7348
7349
7350/**
7351 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7352 * backdoor logging w/o checking what it is running inside.
7353 *
7354 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7355 * backdoor port and magic numbers loaded in registers.
7356 *
7357 * @returns true if it is, false if it isn't.
7358 * @sa hmR0SvmIsMesaDrvGp.
7359 */
7360DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7361{
7362 /* 0xed: IN eAX,dx */
7363 uint8_t abInstr[1];
7364 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7365 return false;
7366
7367 /* Check that it is #GP(0). */
7368 if (pVmxTransient->uExitIntErrorCode != 0)
7369 return false;
7370
7371 /* Check magic and port. */
7372 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7373 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7374 if (pCtx->rax != UINT32_C(0x564d5868))
7375 return false;
7376 if (pCtx->dx != UINT32_C(0x5658))
7377 return false;
7378
7379 /* Flat ring-3 CS. */
7380 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7381 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7382 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7383 if (pCtx->cs.Attr.n.u2Dpl != 3)
7384 return false;
7385 if (pCtx->cs.u64Base != 0)
7386 return false;
7387
7388 /* Check opcode. */
7389 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7390 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7391 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7392 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7393 if (RT_FAILURE(rc))
7394 return false;
7395 if (abInstr[0] != 0xed)
7396 return false;
7397
7398 return true;
7399}
7400
7401
7402/**
7403 * VM-exit exception handler for \#GP (General-protection exception).
7404 *
7405 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7406 */
7407static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7408{
7409 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7410 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7411
7412 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7413 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7414#ifndef IN_NEM_DARWIN
7415 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7416 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7417 { /* likely */ }
7418 else
7419#endif
7420 {
7421#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7422# ifndef IN_NEM_DARWIN
7423 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7424# else
7425 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7426# endif
7427#endif
7428 /*
7429 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7430 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7431 */
7432 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7433 AssertRCReturn(rc, rc);
7434 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7435 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7436
7437 if ( pVmxTransient->fIsNestedGuest
7438 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7439 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7440 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7441 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7442 else
7443 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7444 return rc;
7445 }
7446
7447#ifndef IN_NEM_DARWIN
7448 Assert(CPUMIsGuestInRealModeEx(pCtx));
7449 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7450 Assert(!pVmxTransient->fIsNestedGuest);
7451
7452 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7453 AssertRCReturn(rc, rc);
7454
7455 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7456 if (rcStrict == VINF_SUCCESS)
7457 {
7458 if (!CPUMIsGuestInRealModeEx(pCtx))
7459 {
7460 /*
7461 * The guest is no longer in real-mode, check if we can continue executing the
7462 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7463 */
7464 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7465 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7466 {
7467 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7468 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7469 }
7470 else
7471 {
7472 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7473 rcStrict = VINF_EM_RESCHEDULE;
7474 }
7475 }
7476 else
7477 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7478 }
7479 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7480 {
7481 rcStrict = VINF_SUCCESS;
7482 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7483 }
7484 return VBOXSTRICTRC_VAL(rcStrict);
7485#endif
7486}
7487
7488
7489/**
7490 * VM-exit exception handler for \#DE (Divide Error).
7491 *
7492 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7493 */
7494static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7495{
7496 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7497 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7498
7499 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7500 AssertRCReturn(rc, rc);
7501
7502 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7503 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7504 {
7505 uint8_t cbInstr = 0;
7506 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7507 if (rc2 == VINF_SUCCESS)
7508 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7509 else if (rc2 == VERR_NOT_FOUND)
7510 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7511 else
7512 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7513 }
7514 else
7515 rcStrict = VINF_SUCCESS; /* Do nothing. */
7516
7517 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7518 if (RT_FAILURE(rcStrict))
7519 {
7520 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7521 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7522 rcStrict = VINF_SUCCESS;
7523 }
7524
7525 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7526 return VBOXSTRICTRC_VAL(rcStrict);
7527}
7528
7529
7530/**
7531 * VM-exit exception handler wrapper for all other exceptions that are not handled
7532 * by a specific handler.
7533 *
7534 * This simply re-injects the exception back into the VM without any special
7535 * processing.
7536 *
7537 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7538 */
7539static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7540{
7541 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7542
7543#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7544# ifndef IN_NEM_DARWIN
7545 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7546 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7547 ("uVector=%#x u32XcptBitmap=%#X32\n",
7548 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7549 NOREF(pVmcsInfo);
7550# endif
7551#endif
7552
7553 /*
7554 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7555 * would have been handled while checking exits due to event delivery.
7556 */
7557 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7558
7559#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7560 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7561 AssertRCReturn(rc, rc);
7562 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7563#endif
7564
7565#ifdef VBOX_WITH_STATISTICS
7566 switch (uVector)
7567 {
7568 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7569 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7570 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7571 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7572 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7573 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7574 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7575 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7576 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7577 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7578 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7579 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7580 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7581 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7582 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7583 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7584 default:
7585 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7586 break;
7587 }
7588#endif
7589
7590 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7591 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7592 NOREF(uVector);
7593
7594 /* Re-inject the original exception into the guest. */
7595 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7596 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7597 return VINF_SUCCESS;
7598}
7599
7600
7601/**
7602 * VM-exit exception handler for all exceptions (except NMIs!).
7603 *
7604 * @remarks This may be called for both guests and nested-guests. Take care to not
7605 * make assumptions and avoid doing anything that is not relevant when
7606 * executing a nested-guest (e.g., Mesa driver hacks).
7607 */
7608static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7609{
7610 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7611
7612 /*
7613 * If this VM-exit occurred while delivering an event through the guest IDT, take
7614 * action based on the return code and additional hints (e.g. for page-faults)
7615 * that will be updated in the VMX transient structure.
7616 */
7617 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7618 if (rcStrict == VINF_SUCCESS)
7619 {
7620 /*
7621 * If an exception caused a VM-exit due to delivery of an event, the original
7622 * event may have to be re-injected into the guest. We shall reinject it and
7623 * continue guest execution. However, page-fault is a complicated case and
7624 * needs additional processing done in vmxHCExitXcptPF().
7625 */
7626 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7627 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7628 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7629 || uVector == X86_XCPT_PF)
7630 {
7631 switch (uVector)
7632 {
7633 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7634 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7635 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7636 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7637 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7638 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7639 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7640 default:
7641 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7642 }
7643 }
7644 /* else: inject pending event before resuming guest execution. */
7645 }
7646 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7647 {
7648 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7649 rcStrict = VINF_SUCCESS;
7650 }
7651
7652 return rcStrict;
7653}
7654/** @} */
7655
7656
7657/** @name VM-exit handlers.
7658 * @{
7659 */
7660/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7661/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7662/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7663
7664/**
7665 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7666 */
7667HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7668{
7669 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7670 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7671
7672#ifndef IN_NEM_DARWIN
7673 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7674 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7675 return VINF_SUCCESS;
7676 return VINF_EM_RAW_INTERRUPT;
7677#else
7678 return VINF_SUCCESS;
7679#endif
7680}
7681
7682
7683/**
7684 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7685 * VM-exit.
7686 */
7687HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7688{
7689 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7690 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7691
7692 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7693
7694 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7695 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7696 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7697
7698 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7699 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7700 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7701 NOREF(pVmcsInfo);
7702
7703 VBOXSTRICTRC rcStrict;
7704 switch (uExitIntType)
7705 {
7706#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7707 /*
7708 * Host physical NMIs:
7709 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7710 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7711 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7712 *
7713 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7714 * See Intel spec. 27.5.5 "Updating Non-Register State".
7715 */
7716 case VMX_EXIT_INT_INFO_TYPE_NMI:
7717 {
7718 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7719 break;
7720 }
7721#endif
7722
7723 /*
7724 * Privileged software exceptions (#DB from ICEBP),
7725 * Software exceptions (#BP and #OF),
7726 * Hardware exceptions:
7727 * Process the required exceptions and resume guest execution if possible.
7728 */
7729 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7730 Assert(uVector == X86_XCPT_DB);
7731 RT_FALL_THRU();
7732 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7733 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7734 RT_FALL_THRU();
7735 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7736 {
7737 NOREF(uVector);
7738 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7739 | HMVMX_READ_EXIT_INSTR_LEN
7740 | HMVMX_READ_IDT_VECTORING_INFO
7741 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7742 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7743 break;
7744 }
7745
7746 default:
7747 {
7748 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7749 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7750 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7751 break;
7752 }
7753 }
7754
7755 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7756 return rcStrict;
7757}
7758
7759
7760/**
7761 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7762 */
7763HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7764{
7765 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7766
7767 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7768 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7769 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7770
7771 /* Evaluate and deliver pending events and resume guest execution. */
7772 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7773 return VINF_SUCCESS;
7774}
7775
7776
7777/**
7778 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7779 */
7780HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7781{
7782 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7783
7784 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7785 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7786 {
7787 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7788 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7789 }
7790
7791 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7792
7793 /*
7794 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7795 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7796 */
7797 uint32_t fIntrState;
7798 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7799 AssertRC(rc);
7800 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7801 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7802 {
7803 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7804
7805 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7806 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7807 AssertRC(rc);
7808 }
7809
7810 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready. */
7811 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7812
7813 /* Evaluate and deliver pending events and resume guest execution. */
7814 return VINF_SUCCESS;
7815}
7816
7817
7818/**
7819 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7820 */
7821HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7822{
7823 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7824 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7825}
7826
7827
7828/**
7829 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7830 */
7831HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7832{
7833 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7834 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7835}
7836
7837
7838/**
7839 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7840 */
7841HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7842{
7843 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7844
7845 /*
7846 * Get the state we need and update the exit history entry.
7847 */
7848 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7849 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7850 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7851 AssertRCReturn(rc, rc);
7852
7853 VBOXSTRICTRC rcStrict;
7854 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7855 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7856 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7857 if (!pExitRec)
7858 {
7859 /*
7860 * Regular CPUID instruction execution.
7861 */
7862 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7863 if (rcStrict == VINF_SUCCESS)
7864 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7865 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7866 {
7867 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7868 rcStrict = VINF_SUCCESS;
7869 }
7870 }
7871 else
7872 {
7873 /*
7874 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7875 */
7876 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7877 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7878 AssertRCReturn(rc2, rc2);
7879
7880 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7881 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7882
7883 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7884 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7885
7886 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7887 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7888 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7889 }
7890 return rcStrict;
7891}
7892
7893
7894/**
7895 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7896 */
7897HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7898{
7899 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7900
7901 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7902 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7903 AssertRCReturn(rc, rc);
7904
7905 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7906 return VINF_EM_RAW_EMULATE_INSTR;
7907
7908 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7909 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7910}
7911
7912
7913/**
7914 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7915 */
7916HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7917{
7918 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7919
7920 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7921 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7922 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7923 AssertRCReturn(rc, rc);
7924
7925 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7926 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7927 {
7928 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7929 we must reset offsetting on VM-entry. See @bugref{6634}. */
7930 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7931 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7932 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7933 }
7934 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7935 {
7936 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7937 rcStrict = VINF_SUCCESS;
7938 }
7939 return rcStrict;
7940}
7941
7942
7943/**
7944 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7945 */
7946HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7947{
7948 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7949
7950 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7951 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7952 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7953 AssertRCReturn(rc, rc);
7954
7955 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7956 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7957 {
7958 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7959 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7960 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7961 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7962 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7963 }
7964 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7965 {
7966 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7967 rcStrict = VINF_SUCCESS;
7968 }
7969 return rcStrict;
7970}
7971
7972
7973/**
7974 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7975 */
7976HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7977{
7978 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7979
7980 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7981 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7982 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7983 AssertRCReturn(rc, rc);
7984
7985 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7986 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7987 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7988 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7989 {
7990 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7991 rcStrict = VINF_SUCCESS;
7992 }
7993 return rcStrict;
7994}
7995
7996
7997/**
7998 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7999 */
8000HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8001{
8002 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8003
8004 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
8005 if (EMAreHypercallInstructionsEnabled(pVCpu))
8006 {
8007 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8008 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
8009 | CPUMCTX_EXTRN_RFLAGS
8010 | CPUMCTX_EXTRN_CR0
8011 | CPUMCTX_EXTRN_SS
8012 | CPUMCTX_EXTRN_CS
8013 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
8014 AssertRCReturn(rc, rc);
8015
8016 /* Perform the hypercall. */
8017 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
8018 if (rcStrict == VINF_SUCCESS)
8019 {
8020 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8021 AssertRCReturn(rc, rc);
8022 }
8023 else
8024 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
8025 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
8026 || RT_FAILURE(rcStrict));
8027
8028 /* If the hypercall changes anything other than guest's general-purpose registers,
8029 we would need to reload the guest changed bits here before VM-entry. */
8030 }
8031 else
8032 Log4Func(("Hypercalls not enabled\n"));
8033
8034 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
8035 if (RT_FAILURE(rcStrict))
8036 {
8037 vmxHCSetPendingXcptUD(pVCpu);
8038 rcStrict = VINF_SUCCESS;
8039 }
8040
8041 return rcStrict;
8042}
8043
8044
8045/**
8046 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
8047 */
8048HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8049{
8050 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8051#ifndef IN_NEM_DARWIN
8052 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
8053#endif
8054
8055 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8056 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8057 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8058 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8059 AssertRCReturn(rc, rc);
8060
8061 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
8062
8063 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
8064 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8065 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8066 {
8067 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8068 rcStrict = VINF_SUCCESS;
8069 }
8070 else
8071 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
8072 VBOXSTRICTRC_VAL(rcStrict)));
8073 return rcStrict;
8074}
8075
8076
8077/**
8078 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
8079 */
8080HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8081{
8082 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8083
8084 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8085 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8086 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
8087 AssertRCReturn(rc, rc);
8088
8089 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
8090 if (rcStrict == VINF_SUCCESS)
8091 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8092 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8093 {
8094 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8095 rcStrict = VINF_SUCCESS;
8096 }
8097
8098 return rcStrict;
8099}
8100
8101
8102/**
8103 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8104 */
8105HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8106{
8107 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8108
8109 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8110 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8111 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8112 AssertRCReturn(rc, rc);
8113
8114 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
8115 if (RT_SUCCESS(rcStrict))
8116 {
8117 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8118 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
8119 rcStrict = VINF_SUCCESS;
8120 }
8121
8122 return rcStrict;
8123}
8124
8125
8126/**
8127 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8128 * VM-exit.
8129 */
8130HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8131{
8132 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8133 return VINF_EM_RESET;
8134}
8135
8136
8137/**
8138 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8139 */
8140HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8141{
8142 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8143
8144 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8145 AssertRCReturn(rc, rc);
8146
8147 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8148 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8149 rc = VINF_SUCCESS;
8150 else
8151 rc = VINF_EM_HALT;
8152
8153 if (rc != VINF_SUCCESS)
8154 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8155 return rc;
8156}
8157
8158
8159#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8160/**
8161 * VM-exit handler for instructions that result in a \#UD exception delivered to
8162 * the guest.
8163 */
8164HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8165{
8166 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8167 vmxHCSetPendingXcptUD(pVCpu);
8168 return VINF_SUCCESS;
8169}
8170#endif
8171
8172
8173/**
8174 * VM-exit handler for expiry of the VMX-preemption timer.
8175 */
8176HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8177{
8178 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8179
8180 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8181 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8182Log12(("vmxHCExitPreemptTimer:\n"));
8183
8184 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8185 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8186 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8187 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8188 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8189}
8190
8191
8192/**
8193 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8194 */
8195HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8196{
8197 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8198
8199 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8200 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8201 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8202 AssertRCReturn(rc, rc);
8203
8204 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8205 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8206 : HM_CHANGED_RAISED_XCPT_MASK);
8207
8208#ifndef IN_NEM_DARWIN
8209 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8210 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8211 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8212 {
8213 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8214 hmR0VmxUpdateStartVmFunction(pVCpu);
8215 }
8216#endif
8217
8218 return rcStrict;
8219}
8220
8221
8222/**
8223 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8224 */
8225HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8226{
8227 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8228
8229 /** @todo Enable the new code after finding a reliably guest test-case. */
8230#if 1
8231 return VERR_EM_INTERPRETER;
8232#else
8233 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8234 | HMVMX_READ_EXIT_INSTR_INFO
8235 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8236 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8237 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8238 AssertRCReturn(rc, rc);
8239
8240 /* Paranoia. Ensure this has a memory operand. */
8241 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8242
8243 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8244 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8245 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8246 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8247
8248 RTGCPTR GCPtrDesc;
8249 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8250
8251 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8252 GCPtrDesc, uType);
8253 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8254 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8255 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8256 {
8257 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8258 rcStrict = VINF_SUCCESS;
8259 }
8260 return rcStrict;
8261#endif
8262}
8263
8264
8265/**
8266 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8267 * VM-exit.
8268 */
8269HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8270{
8271 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8272 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8273 AssertRCReturn(rc, rc);
8274
8275 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8276 if (RT_FAILURE(rc))
8277 return rc;
8278
8279 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8280 NOREF(uInvalidReason);
8281
8282#ifdef VBOX_STRICT
8283 uint32_t fIntrState;
8284 uint64_t u64Val;
8285 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8286 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8287 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8288
8289 Log4(("uInvalidReason %u\n", uInvalidReason));
8290 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8291 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8292 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8293
8294 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8295 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8296 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8297 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8298 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8299 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8300 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8301 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8302 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8303 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8304 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8305 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8306# ifndef IN_NEM_DARWIN
8307 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8308 {
8309 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8310 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8311 }
8312
8313 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8314# endif
8315#endif
8316
8317 return VERR_VMX_INVALID_GUEST_STATE;
8318}
8319
8320/**
8321 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8322 */
8323HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8324{
8325 /*
8326 * Cumulative notes of all recognized but unexpected VM-exits.
8327 *
8328 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8329 * nested-paging is used.
8330 *
8331 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8332 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8333 * this function (and thereby stop VM execution) for handling such instructions.
8334 *
8335 *
8336 * VMX_EXIT_INIT_SIGNAL:
8337 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8338 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8339 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8340 *
8341 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8342 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8343 * See Intel spec. "23.8 Restrictions on VMX operation".
8344 *
8345 * VMX_EXIT_SIPI:
8346 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8347 * activity state is used. We don't make use of it as our guests don't have direct
8348 * access to the host local APIC.
8349 *
8350 * See Intel spec. 25.3 "Other Causes of VM-exits".
8351 *
8352 * VMX_EXIT_IO_SMI:
8353 * VMX_EXIT_SMI:
8354 * This can only happen if we support dual-monitor treatment of SMI, which can be
8355 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8356 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8357 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8358 *
8359 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8360 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8361 *
8362 * VMX_EXIT_ERR_MSR_LOAD:
8363 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8364 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8365 * execution.
8366 *
8367 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8368 *
8369 * VMX_EXIT_ERR_MACHINE_CHECK:
8370 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8371 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8372 * #MC exception abort class exception is raised. We thus cannot assume a
8373 * reasonable chance of continuing any sort of execution and we bail.
8374 *
8375 * See Intel spec. 15.1 "Machine-check Architecture".
8376 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8377 *
8378 * VMX_EXIT_PML_FULL:
8379 * VMX_EXIT_VIRTUALIZED_EOI:
8380 * VMX_EXIT_APIC_WRITE:
8381 * We do not currently support any of these features and thus they are all unexpected
8382 * VM-exits.
8383 *
8384 * VMX_EXIT_GDTR_IDTR_ACCESS:
8385 * VMX_EXIT_LDTR_TR_ACCESS:
8386 * VMX_EXIT_RDRAND:
8387 * VMX_EXIT_RSM:
8388 * VMX_EXIT_VMFUNC:
8389 * VMX_EXIT_ENCLS:
8390 * VMX_EXIT_RDSEED:
8391 * VMX_EXIT_XSAVES:
8392 * VMX_EXIT_XRSTORS:
8393 * VMX_EXIT_UMWAIT:
8394 * VMX_EXIT_TPAUSE:
8395 * VMX_EXIT_LOADIWKEY:
8396 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8397 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8398 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8399 *
8400 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8401 */
8402 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8403 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8404 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8405}
8406
8407
8408/**
8409 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8410 */
8411HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8412{
8413 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8414
8415 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8416
8417 /** @todo Optimize this: We currently drag in the whole MSR state
8418 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8419 * MSRs required. That would require changes to IEM and possibly CPUM too.
8420 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8421 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8422 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8423 int rc;
8424 switch (idMsr)
8425 {
8426 default:
8427 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8428 __FUNCTION__);
8429 AssertRCReturn(rc, rc);
8430 break;
8431 case MSR_K8_FS_BASE:
8432 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8433 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8434 AssertRCReturn(rc, rc);
8435 break;
8436 case MSR_K8_GS_BASE:
8437 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8438 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8439 AssertRCReturn(rc, rc);
8440 break;
8441 }
8442
8443 Log4Func(("ecx=%#RX32\n", idMsr));
8444
8445#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8446 Assert(!pVmxTransient->fIsNestedGuest);
8447 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8448 {
8449 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8450 && idMsr != MSR_K6_EFER)
8451 {
8452 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8453 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8454 }
8455 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8456 {
8457 Assert(pVmcsInfo->pvMsrBitmap);
8458 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8459 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8460 {
8461 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8462 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8463 }
8464 }
8465 }
8466#endif
8467
8468 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8469 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8470 if (rcStrict == VINF_SUCCESS)
8471 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8472 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8473 {
8474 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8475 rcStrict = VINF_SUCCESS;
8476 }
8477 else
8478 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8479 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8480
8481 return rcStrict;
8482}
8483
8484
8485/**
8486 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8487 */
8488HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8489{
8490 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8491
8492 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8493
8494 /*
8495 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8496 * Although we don't need to fetch the base as it will be overwritten shortly, while
8497 * loading guest-state we would also load the entire segment register including limit
8498 * and attributes and thus we need to load them here.
8499 */
8500 /** @todo Optimize this: We currently drag in the whole MSR state
8501 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8502 * MSRs required. That would require changes to IEM and possibly CPUM too.
8503 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8504 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8505 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8506 int rc;
8507 switch (idMsr)
8508 {
8509 default:
8510 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8511 __FUNCTION__);
8512 AssertRCReturn(rc, rc);
8513 break;
8514
8515 case MSR_K8_FS_BASE:
8516 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8517 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8518 AssertRCReturn(rc, rc);
8519 break;
8520 case MSR_K8_GS_BASE:
8521 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8522 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8523 AssertRCReturn(rc, rc);
8524 break;
8525 }
8526 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8527
8528 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8529 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8530
8531 if (rcStrict == VINF_SUCCESS)
8532 {
8533 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8534
8535 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8536 if ( idMsr == MSR_IA32_APICBASE
8537 || ( idMsr >= MSR_IA32_X2APIC_START
8538 && idMsr <= MSR_IA32_X2APIC_END))
8539 {
8540 /*
8541 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8542 * When full APIC register virtualization is implemented we'll have to make
8543 * sure APIC state is saved from the VMCS before IEM changes it.
8544 */
8545 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8546 }
8547 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8548 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8549 else if (idMsr == MSR_K6_EFER)
8550 {
8551 /*
8552 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8553 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8554 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8555 */
8556 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8557 }
8558
8559 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8560 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8561 {
8562 switch (idMsr)
8563 {
8564 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8565 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8566 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8567 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8568 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8569 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8570 default:
8571 {
8572#ifndef IN_NEM_DARWIN
8573 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8574 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8575 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8576 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8577#else
8578 AssertMsgFailed(("TODO\n"));
8579#endif
8580 break;
8581 }
8582 }
8583 }
8584#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8585 else
8586 {
8587 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8588 switch (idMsr)
8589 {
8590 case MSR_IA32_SYSENTER_CS:
8591 case MSR_IA32_SYSENTER_EIP:
8592 case MSR_IA32_SYSENTER_ESP:
8593 case MSR_K8_FS_BASE:
8594 case MSR_K8_GS_BASE:
8595 {
8596 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8597 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8598 }
8599
8600 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8601 default:
8602 {
8603 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8604 {
8605 /* EFER MSR writes are always intercepted. */
8606 if (idMsr != MSR_K6_EFER)
8607 {
8608 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8609 idMsr));
8610 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8611 }
8612 }
8613
8614 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8615 {
8616 Assert(pVmcsInfo->pvMsrBitmap);
8617 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8618 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8619 {
8620 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8621 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8622 }
8623 }
8624 break;
8625 }
8626 }
8627 }
8628#endif /* VBOX_STRICT */
8629 }
8630 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8631 {
8632 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8633 rcStrict = VINF_SUCCESS;
8634 }
8635 else
8636 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8637 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8638
8639 return rcStrict;
8640}
8641
8642
8643/**
8644 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8645 */
8646HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8647{
8648 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8649
8650 /** @todo The guest has likely hit a contended spinlock. We might want to
8651 * poke a schedule different guest VCPU. */
8652 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8653 if (RT_SUCCESS(rc))
8654 return VINF_EM_RAW_INTERRUPT;
8655
8656 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8657 return rc;
8658}
8659
8660
8661/**
8662 * VM-exit handler for when the TPR value is lowered below the specified
8663 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8664 */
8665HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8666{
8667 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8668 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8669
8670 /*
8671 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8672 * We'll re-evaluate pending interrupts and inject them before the next VM
8673 * entry so we can just continue execution here.
8674 */
8675 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8676 return VINF_SUCCESS;
8677}
8678
8679
8680/**
8681 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8682 * VM-exit.
8683 *
8684 * @retval VINF_SUCCESS when guest execution can continue.
8685 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8686 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8687 * incompatible guest state for VMX execution (real-on-v86 case).
8688 */
8689HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8690{
8691 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8692 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8693
8694 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8695 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8696 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8697
8698 VBOXSTRICTRC rcStrict;
8699 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8700 uint64_t const uExitQual = pVmxTransient->uExitQual;
8701 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8702 switch (uAccessType)
8703 {
8704 /*
8705 * MOV to CRx.
8706 */
8707 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8708 {
8709 /*
8710 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8711 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8712 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8713 * PAE PDPTEs as well.
8714 */
8715 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8716 AssertRCReturn(rc, rc);
8717
8718 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8719#ifndef IN_NEM_DARWIN
8720 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8721#endif
8722 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8723 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8724
8725 /*
8726 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8727 * - When nested paging isn't used.
8728 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8729 * - We are executing in the VM debug loop.
8730 */
8731#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8732# ifndef IN_NEM_DARWIN
8733 Assert( iCrReg != 3
8734 || !VM_IS_VMX_NESTED_PAGING(pVM)
8735 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8736 || pVCpu->hmr0.s.fUsingDebugLoop);
8737# else
8738 Assert( iCrReg != 3
8739 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8740# endif
8741#endif
8742
8743 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8744 Assert( iCrReg != 8
8745 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8746
8747 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8748 AssertMsg( rcStrict == VINF_SUCCESS
8749 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8750
8751#ifndef IN_NEM_DARWIN
8752 /*
8753 * This is a kludge for handling switches back to real mode when we try to use
8754 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8755 * deal with special selector values, so we have to return to ring-3 and run
8756 * there till the selector values are V86 mode compatible.
8757 *
8758 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8759 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8760 * this function.
8761 */
8762 if ( iCrReg == 0
8763 && rcStrict == VINF_SUCCESS
8764 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8765 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8766 && (uOldCr0 & X86_CR0_PE)
8767 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8768 {
8769 /** @todo Check selectors rather than returning all the time. */
8770 Assert(!pVmxTransient->fIsNestedGuest);
8771 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8772 rcStrict = VINF_EM_RESCHEDULE_REM;
8773 }
8774#endif
8775
8776 break;
8777 }
8778
8779 /*
8780 * MOV from CRx.
8781 */
8782 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8783 {
8784 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8785 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8786
8787 /*
8788 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8789 * - When nested paging isn't used.
8790 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8791 * - We are executing in the VM debug loop.
8792 */
8793#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8794# ifndef IN_NEM_DARWIN
8795 Assert( iCrReg != 3
8796 || !VM_IS_VMX_NESTED_PAGING(pVM)
8797 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8798 || pVCpu->hmr0.s.fLeaveDone);
8799# else
8800 Assert( iCrReg != 3
8801 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8802# endif
8803#endif
8804
8805 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8806 Assert( iCrReg != 8
8807 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8808
8809 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8810 break;
8811 }
8812
8813 /*
8814 * CLTS (Clear Task-Switch Flag in CR0).
8815 */
8816 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8817 {
8818 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8819 break;
8820 }
8821
8822 /*
8823 * LMSW (Load Machine-Status Word into CR0).
8824 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8825 */
8826 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8827 {
8828 RTGCPTR GCPtrEffDst;
8829 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8830 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8831 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8832 if (fMemOperand)
8833 {
8834 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8835 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8836 }
8837 else
8838 GCPtrEffDst = NIL_RTGCPTR;
8839 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8840 break;
8841 }
8842
8843 default:
8844 {
8845 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8846 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8847 }
8848 }
8849
8850 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8851 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8852 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8853
8854 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8855 NOREF(pVM);
8856 return rcStrict;
8857}
8858
8859
8860/**
8861 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8862 * VM-exit.
8863 */
8864HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8865{
8866 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8867 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8868
8869 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8870 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8871 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8872 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8873#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8874 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8875 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8876 AssertRCReturn(rc, rc);
8877
8878 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8879 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8880 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8881 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8882 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8883 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8884 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8885 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8886
8887 /*
8888 * Update exit history to see if this exit can be optimized.
8889 */
8890 VBOXSTRICTRC rcStrict;
8891 PCEMEXITREC pExitRec = NULL;
8892 if ( !fGstStepping
8893 && !fDbgStepping)
8894 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8895 !fIOString
8896 ? !fIOWrite
8897 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8898 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8899 : !fIOWrite
8900 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8901 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8902 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8903 if (!pExitRec)
8904 {
8905 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8906 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8907
8908 uint32_t const cbValue = s_aIOSizes[uIOSize];
8909 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8910 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8911 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8912 if (fIOString)
8913 {
8914 /*
8915 * INS/OUTS - I/O String instruction.
8916 *
8917 * Use instruction-information if available, otherwise fall back on
8918 * interpreting the instruction.
8919 */
8920 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8921 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8922 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8923 if (fInsOutsInfo)
8924 {
8925 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8926 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8927 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8928 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8929 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8930 if (fIOWrite)
8931 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8932 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8933 else
8934 {
8935 /*
8936 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8937 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8938 * See Intel Instruction spec. for "INS".
8939 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8940 */
8941 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8942 }
8943 }
8944 else
8945 rcStrict = IEMExecOne(pVCpu);
8946
8947 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8948 fUpdateRipAlready = true;
8949 }
8950 else
8951 {
8952 /*
8953 * IN/OUT - I/O instruction.
8954 */
8955 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8956 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8957 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8958 if (fIOWrite)
8959 {
8960 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8961 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8962#ifndef IN_NEM_DARWIN
8963 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8964 && !pCtx->eflags.Bits.u1TF)
8965 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8966#endif
8967 }
8968 else
8969 {
8970 uint32_t u32Result = 0;
8971 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8972 if (IOM_SUCCESS(rcStrict))
8973 {
8974 /* Save result of I/O IN instr. in AL/AX/EAX. */
8975 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8976 }
8977#ifndef IN_NEM_DARWIN
8978 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8979 && !pCtx->eflags.Bits.u1TF)
8980 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8981#endif
8982 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8983 }
8984 }
8985
8986 if (IOM_SUCCESS(rcStrict))
8987 {
8988 if (!fUpdateRipAlready)
8989 {
8990 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8991 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8992 }
8993
8994 /*
8995 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8996 * while booting Fedora 17 64-bit guest.
8997 *
8998 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8999 */
9000 if (fIOString)
9001 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
9002
9003 /*
9004 * If any I/O breakpoints are armed, we need to check if one triggered
9005 * and take appropriate action.
9006 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
9007 */
9008#if 1
9009 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
9010#else
9011 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
9012 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
9013 AssertRCReturn(rc, rc);
9014#endif
9015
9016 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
9017 * execution engines about whether hyper BPs and such are pending. */
9018 uint32_t const uDr7 = pCtx->dr[7];
9019 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
9020 && X86_DR7_ANY_RW_IO(uDr7)
9021 && (pCtx->cr4 & X86_CR4_DE))
9022 || DBGFBpIsHwIoArmed(pVM)))
9023 {
9024 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
9025
9026#ifndef IN_NEM_DARWIN
9027 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
9028 VMMRZCallRing3Disable(pVCpu);
9029 HM_DISABLE_PREEMPT(pVCpu);
9030
9031 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
9032
9033 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
9034 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
9035 {
9036 /* Raise #DB. */
9037 if (fIsGuestDbgActive)
9038 ASMSetDR6(pCtx->dr[6]);
9039 if (pCtx->dr[7] != uDr7)
9040 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
9041
9042 vmxHCSetPendingXcptDB(pVCpu);
9043 }
9044 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
9045 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
9046 else if ( rcStrict2 != VINF_SUCCESS
9047 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
9048 rcStrict = rcStrict2;
9049 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
9050
9051 HM_RESTORE_PREEMPT();
9052 VMMRZCallRing3Enable(pVCpu);
9053#else
9054 /** @todo */
9055#endif
9056 }
9057 }
9058
9059#ifdef VBOX_STRICT
9060 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
9061 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
9062 Assert(!fIOWrite);
9063 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
9064 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
9065 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
9066 Assert(fIOWrite);
9067 else
9068 {
9069# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
9070 * statuses, that the VMM device and some others may return. See
9071 * IOM_SUCCESS() for guidance. */
9072 AssertMsg( RT_FAILURE(rcStrict)
9073 || rcStrict == VINF_SUCCESS
9074 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
9075 || rcStrict == VINF_EM_DBG_BREAKPOINT
9076 || rcStrict == VINF_EM_RAW_GUEST_TRAP
9077 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9078# endif
9079 }
9080#endif
9081 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
9082 }
9083 else
9084 {
9085 /*
9086 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
9087 */
9088 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
9089 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
9090 AssertRCReturn(rc2, rc2);
9091 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
9092 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
9093 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
9094 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9095 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
9096 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
9097
9098 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9099 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9100
9101 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9102 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9103 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9104 }
9105 return rcStrict;
9106}
9107
9108
9109/**
9110 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9111 * VM-exit.
9112 */
9113HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9114{
9115 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9116
9117 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9118 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9119 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
9120 {
9121 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
9122 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9123 {
9124 uint32_t uErrCode;
9125 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
9126 {
9127 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9128 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9129 }
9130 else
9131 uErrCode = 0;
9132
9133 RTGCUINTPTR GCPtrFaultAddress;
9134 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9135 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9136 else
9137 GCPtrFaultAddress = 0;
9138
9139 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9140
9141 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9142 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9143
9144 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9145 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9146 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9147 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9148 }
9149 }
9150
9151 /* Fall back to the interpreter to emulate the task-switch. */
9152 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9153 return VERR_EM_INTERPRETER;
9154}
9155
9156
9157/**
9158 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9159 */
9160HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9161{
9162 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9163
9164 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9165 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9166 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9167 AssertRC(rc);
9168 return VINF_EM_DBG_STEPPED;
9169}
9170
9171
9172/**
9173 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9174 */
9175HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9176{
9177 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9178 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9179
9180 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9181 | HMVMX_READ_EXIT_INSTR_LEN
9182 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9183 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9184 | HMVMX_READ_IDT_VECTORING_INFO
9185 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9186
9187 /*
9188 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9189 */
9190 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9191 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9192 {
9193 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9194 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9195 {
9196 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9197 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9198 }
9199 }
9200 else
9201 {
9202 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9203 return rcStrict;
9204 }
9205
9206 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9207 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9208 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9209 AssertRCReturn(rc, rc);
9210
9211 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9212 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9213 switch (uAccessType)
9214 {
9215#ifndef IN_NEM_DARWIN
9216 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9217 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9218 {
9219 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9220 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9221 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9222
9223 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9224 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9225 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9226 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9227 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9228
9229 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9230 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9231 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9232 if ( rcStrict == VINF_SUCCESS
9233 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9234 || rcStrict == VERR_PAGE_NOT_PRESENT)
9235 {
9236 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9237 | HM_CHANGED_GUEST_APIC_TPR);
9238 rcStrict = VINF_SUCCESS;
9239 }
9240 break;
9241 }
9242#else
9243 /** @todo */
9244#endif
9245
9246 default:
9247 {
9248 Log4Func(("uAccessType=%#x\n", uAccessType));
9249 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9250 break;
9251 }
9252 }
9253
9254 if (rcStrict != VINF_SUCCESS)
9255 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9256 return rcStrict;
9257}
9258
9259
9260/**
9261 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9262 * VM-exit.
9263 */
9264HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9265{
9266 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9267 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9268
9269 /*
9270 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9271 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9272 * must emulate the MOV DRx access.
9273 */
9274 if (!pVmxTransient->fIsNestedGuest)
9275 {
9276 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9277 if ( pVmxTransient->fWasGuestDebugStateActive
9278#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9279 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9280#endif
9281 )
9282 {
9283 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9284 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9285 }
9286
9287 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9288 && !pVmxTransient->fWasHyperDebugStateActive)
9289 {
9290 Assert(!DBGFIsStepping(pVCpu));
9291 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9292
9293 /* Whether we disable intercepting MOV DRx instructions and resume
9294 the current one, or emulate it and keep intercepting them is
9295 configurable. Though it usually comes down to whether there are
9296 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9297#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9298 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9299#else
9300 bool const fResumeInstruction = true;
9301#endif
9302 if (fResumeInstruction)
9303 {
9304 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9305 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9306 AssertRC(rc);
9307 }
9308
9309#ifndef IN_NEM_DARWIN
9310 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9311 VMMRZCallRing3Disable(pVCpu);
9312 HM_DISABLE_PREEMPT(pVCpu);
9313
9314 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9315 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9316 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9317
9318 HM_RESTORE_PREEMPT();
9319 VMMRZCallRing3Enable(pVCpu);
9320#else
9321 CPUMR3NemActivateGuestDebugState(pVCpu);
9322 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9323 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9324#endif
9325
9326 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9327 if (fResumeInstruction)
9328 {
9329#ifdef VBOX_WITH_STATISTICS
9330 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9331 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9332 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9333 else
9334 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9335#endif
9336 return VINF_SUCCESS;
9337 }
9338 }
9339 }
9340
9341 /*
9342 * Import state. We must have DR7 loaded here as it's always consulted,
9343 * both for reading and writing. The other debug registers are never
9344 * exported as such.
9345 */
9346 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9347 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9348 | CPUMCTX_EXTRN_GPRS_MASK
9349 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9350 AssertRCReturn(rc, rc);
9351
9352 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9353 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9354 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9355 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9356
9357 VBOXSTRICTRC rcStrict;
9358 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9359 {
9360 /*
9361 * Write DRx register.
9362 */
9363 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9364 AssertMsg( rcStrict == VINF_SUCCESS
9365 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9366
9367 if (rcStrict == VINF_SUCCESS)
9368 {
9369 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9370 * kept it for now to avoid breaking something non-obvious. */
9371 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9372 | HM_CHANGED_GUEST_DR7);
9373 /* Update the DR6 register if guest debug state is active, otherwise we'll
9374 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9375 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9376 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9377 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9378 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9379 }
9380 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9381 {
9382 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9383 rcStrict = VINF_SUCCESS;
9384 }
9385
9386 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9387 }
9388 else
9389 {
9390 /*
9391 * Read DRx register into a general purpose register.
9392 */
9393 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9394 AssertMsg( rcStrict == VINF_SUCCESS
9395 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9396
9397 if (rcStrict == VINF_SUCCESS)
9398 {
9399 if (iGReg == X86_GREG_xSP)
9400 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9401 | HM_CHANGED_GUEST_RSP);
9402 else
9403 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9404 }
9405 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9406 {
9407 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9408 rcStrict = VINF_SUCCESS;
9409 }
9410
9411 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9412 }
9413
9414 return rcStrict;
9415}
9416
9417
9418/**
9419 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9420 * Conditional VM-exit.
9421 */
9422HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9423{
9424 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9425
9426#ifndef IN_NEM_DARWIN
9427 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9428
9429 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9430 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9431 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9432 | HMVMX_READ_IDT_VECTORING_INFO
9433 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9434 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9435
9436 /*
9437 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9438 */
9439 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9440 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9441 {
9442 /*
9443 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9444 * instruction emulation to inject the original event. Otherwise, injecting the original event
9445 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9446 */
9447 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9448 { /* likely */ }
9449 else
9450 {
9451 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9452# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9453 /** @todo NSTVMX: Think about how this should be handled. */
9454 if (pVmxTransient->fIsNestedGuest)
9455 return VERR_VMX_IPE_3;
9456# endif
9457 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9458 }
9459 }
9460 else
9461 {
9462 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9463 return rcStrict;
9464 }
9465
9466 /*
9467 * Get sufficient state and update the exit history entry.
9468 */
9469 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9470 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9471 AssertRCReturn(rc, rc);
9472
9473 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9474 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9475 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9476 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9477 if (!pExitRec)
9478 {
9479 /*
9480 * If we succeed, resume guest execution.
9481 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9482 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9483 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9484 * weird case. See @bugref{6043}.
9485 */
9486 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9487/** @todo bird: We can probably just go straight to IOM here and assume that
9488 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9489 * well. However, we need to address that aliasing workarounds that
9490 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9491 *
9492 * Might also be interesting to see if we can get this done more or
9493 * less locklessly inside IOM. Need to consider the lookup table
9494 * updating and use a bit more carefully first (or do all updates via
9495 * rendezvous) */
9496 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9497 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9498 if ( rcStrict == VINF_SUCCESS
9499 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9500 || rcStrict == VERR_PAGE_NOT_PRESENT)
9501 {
9502 /* Successfully handled MMIO operation. */
9503 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9504 | HM_CHANGED_GUEST_APIC_TPR);
9505 rcStrict = VINF_SUCCESS;
9506 }
9507 }
9508 else
9509 {
9510 /*
9511 * Frequent exit or something needing probing. Call EMHistoryExec.
9512 */
9513 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9514 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9515
9516 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9517 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9518
9519 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9520 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9521 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9522 }
9523 return rcStrict;
9524#else
9525 AssertFailed();
9526 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9527#endif
9528}
9529
9530
9531/**
9532 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9533 * VM-exit.
9534 */
9535HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9536{
9537 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9538#ifndef IN_NEM_DARWIN
9539 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9540
9541 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9542 | HMVMX_READ_EXIT_INSTR_LEN
9543 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9544 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9545 | HMVMX_READ_IDT_VECTORING_INFO
9546 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9547 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9548
9549 /*
9550 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9551 */
9552 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9553 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9554 {
9555 /*
9556 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9557 * we shall resolve the nested #PF and re-inject the original event.
9558 */
9559 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9560 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9561 }
9562 else
9563 {
9564 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9565 return rcStrict;
9566 }
9567
9568 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9569 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9570 AssertRCReturn(rc, rc);
9571
9572 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9573 uint64_t const uExitQual = pVmxTransient->uExitQual;
9574 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9575
9576 RTGCUINT uErrorCode = 0;
9577 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9578 uErrorCode |= X86_TRAP_PF_ID;
9579 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9580 uErrorCode |= X86_TRAP_PF_RW;
9581 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9582 uErrorCode |= X86_TRAP_PF_P;
9583
9584 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9585 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9586
9587 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9588
9589 /*
9590 * Handle the pagefault trap for the nested shadow table.
9591 */
9592 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9593 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9594 TRPMResetTrap(pVCpu);
9595
9596 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9597 if ( rcStrict == VINF_SUCCESS
9598 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9599 || rcStrict == VERR_PAGE_NOT_PRESENT)
9600 {
9601 /* Successfully synced our nested page tables. */
9602 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9603 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9604 return VINF_SUCCESS;
9605 }
9606 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9607 return rcStrict;
9608
9609#else /* IN_NEM_DARWIN */
9610 PVM pVM = pVCpu->CTX_SUFF(pVM);
9611 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9612 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9613 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9614 vmxHCImportGuestRip(pVCpu);
9615 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9616
9617 /*
9618 * Ask PGM for information about the given GCPhys. We need to check if we're
9619 * out of sync first.
9620 */
9621 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9622 false,
9623 false };
9624 PGMPHYSNEMPAGEINFO Info;
9625 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9626 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9627 if (RT_SUCCESS(rc))
9628 {
9629 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9630 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9631 {
9632 if (State.fCanResume)
9633 {
9634 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9635 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9636 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9637 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9638 State.fDidSomething ? "" : " no-change"));
9639 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9640 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9641 return VINF_SUCCESS;
9642 }
9643 }
9644
9645 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9646 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9647 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9648 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9649 State.fDidSomething ? "" : " no-change"));
9650 }
9651 else
9652 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9653 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9654 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9655
9656 /*
9657 * Emulate the memory access, either access handler or special memory.
9658 */
9659 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9660 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9661 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9662 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9663 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9664
9665 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9666 AssertRCReturn(rc, rc);
9667
9668 VBOXSTRICTRC rcStrict;
9669 if (!pExitRec)
9670 rcStrict = IEMExecOne(pVCpu);
9671 else
9672 {
9673 /* Frequent access or probing. */
9674 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9675 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9676 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9677 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9678 }
9679
9680 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9681
9682 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9683 return rcStrict;
9684#endif /* IN_NEM_DARWIN */
9685}
9686
9687#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9688
9689/**
9690 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9691 */
9692HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9693{
9694 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9695
9696 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9697 | HMVMX_READ_EXIT_INSTR_INFO
9698 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9699 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9700 | CPUMCTX_EXTRN_SREG_MASK
9701 | CPUMCTX_EXTRN_HWVIRT
9702 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9703 AssertRCReturn(rc, rc);
9704
9705 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9706
9707 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9708 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9709
9710 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9711 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9712 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9713 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9714 {
9715 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9716 rcStrict = VINF_SUCCESS;
9717 }
9718 return rcStrict;
9719}
9720
9721
9722/**
9723 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9724 */
9725HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9726{
9727 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9728
9729 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9730 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9731 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9732 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9733 AssertRCReturn(rc, rc);
9734
9735 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9736
9737 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9738 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9739 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9740 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9741 {
9742 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9743 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9744 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9745 }
9746 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9747 return rcStrict;
9748}
9749
9750
9751/**
9752 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9753 */
9754HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9755{
9756 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9757
9758 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9759 | HMVMX_READ_EXIT_INSTR_INFO
9760 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9761 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9762 | CPUMCTX_EXTRN_SREG_MASK
9763 | CPUMCTX_EXTRN_HWVIRT
9764 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9765 AssertRCReturn(rc, rc);
9766
9767 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9768
9769 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9770 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9771
9772 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9773 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9774 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9775 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9776 {
9777 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9778 rcStrict = VINF_SUCCESS;
9779 }
9780 return rcStrict;
9781}
9782
9783
9784/**
9785 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9786 */
9787HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9788{
9789 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9790
9791 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9792 | HMVMX_READ_EXIT_INSTR_INFO
9793 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9794 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9795 | CPUMCTX_EXTRN_SREG_MASK
9796 | CPUMCTX_EXTRN_HWVIRT
9797 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9798 AssertRCReturn(rc, rc);
9799
9800 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9801
9802 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9803 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9804
9805 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9806 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9807 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9808 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9809 {
9810 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9811 rcStrict = VINF_SUCCESS;
9812 }
9813 return rcStrict;
9814}
9815
9816
9817/**
9818 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9819 */
9820HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9821{
9822 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9823
9824 /*
9825 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9826 * thus might not need to import the shadow VMCS state, it's safer just in case
9827 * code elsewhere dares look at unsynced VMCS fields.
9828 */
9829 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9830 | HMVMX_READ_EXIT_INSTR_INFO
9831 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9832 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9833 | CPUMCTX_EXTRN_SREG_MASK
9834 | CPUMCTX_EXTRN_HWVIRT
9835 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9836 AssertRCReturn(rc, rc);
9837
9838 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9839
9840 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9841 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9842 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9843
9844 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9845 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9846 {
9847 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9848
9849# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9850 /* Try for exit optimization. This is on the following instruction
9851 because it would be a waste of time to have to reinterpret the
9852 already decoded vmwrite instruction. */
9853 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9854 if (pExitRec)
9855 {
9856 /* Frequent access or probing. */
9857 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9858 AssertRCReturn(rc, rc);
9859
9860 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9861 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9862 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9863 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9864 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9865 }
9866# endif
9867 }
9868 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9869 {
9870 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9871 rcStrict = VINF_SUCCESS;
9872 }
9873 return rcStrict;
9874}
9875
9876
9877/**
9878 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9879 */
9880HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9881{
9882 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9883
9884 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9885 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9886 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9887 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9888 AssertRCReturn(rc, rc);
9889
9890 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9891
9892 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9893 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9894 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9895 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9896 {
9897 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9898 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9899 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9900 }
9901 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9902 return rcStrict;
9903}
9904
9905
9906/**
9907 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9908 */
9909HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9910{
9911 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9912
9913 /*
9914 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9915 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9916 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9917 */
9918 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9919 | HMVMX_READ_EXIT_INSTR_INFO
9920 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9921 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9922 | CPUMCTX_EXTRN_SREG_MASK
9923 | CPUMCTX_EXTRN_HWVIRT
9924 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9925 AssertRCReturn(rc, rc);
9926
9927 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9928
9929 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9930 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9931 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9932
9933 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9934 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9935 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9936 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9937 {
9938 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9939 rcStrict = VINF_SUCCESS;
9940 }
9941 return rcStrict;
9942}
9943
9944
9945/**
9946 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9947 */
9948HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9949{
9950 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9951
9952 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9953 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9954 | CPUMCTX_EXTRN_HWVIRT
9955 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9956 AssertRCReturn(rc, rc);
9957
9958 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9959
9960 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9961 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9962 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9963 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9964 {
9965 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9966 rcStrict = VINF_SUCCESS;
9967 }
9968 return rcStrict;
9969}
9970
9971
9972/**
9973 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9974 */
9975HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9976{
9977 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9978
9979 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9980 | HMVMX_READ_EXIT_INSTR_INFO
9981 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9982 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9983 | CPUMCTX_EXTRN_SREG_MASK
9984 | CPUMCTX_EXTRN_HWVIRT
9985 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9986 AssertRCReturn(rc, rc);
9987
9988 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9989
9990 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9991 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9992
9993 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9994 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9995 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9996 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9997 {
9998 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9999 rcStrict = VINF_SUCCESS;
10000 }
10001 return rcStrict;
10002}
10003
10004
10005/**
10006 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
10007 */
10008HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10009{
10010 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10011
10012 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10013 | HMVMX_READ_EXIT_INSTR_INFO
10014 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10015 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10016 | CPUMCTX_EXTRN_SREG_MASK
10017 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10018 AssertRCReturn(rc, rc);
10019
10020 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10021
10022 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10023 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10024
10025 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
10026 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10027 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10028 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10029 {
10030 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10031 rcStrict = VINF_SUCCESS;
10032 }
10033 return rcStrict;
10034}
10035
10036
10037# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10038/**
10039 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
10040 */
10041HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10042{
10043 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10044
10045 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10046 | HMVMX_READ_EXIT_INSTR_INFO
10047 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10048 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10049 | CPUMCTX_EXTRN_SREG_MASK
10050 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10051 AssertRCReturn(rc, rc);
10052
10053 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10054
10055 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10056 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10057
10058 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
10059 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10060 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10061 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10062 {
10063 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10064 rcStrict = VINF_SUCCESS;
10065 }
10066 return rcStrict;
10067}
10068# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10069#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10070/** @} */
10071
10072
10073#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10074/** @name Nested-guest VM-exit handlers.
10075 * @{
10076 */
10077/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10078/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10079/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10080
10081/**
10082 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
10083 * Conditional VM-exit.
10084 */
10085HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10086{
10087 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10088
10089 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
10090
10091 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
10092 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
10093 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
10094
10095 switch (uExitIntType)
10096 {
10097# ifndef IN_NEM_DARWIN
10098 /*
10099 * Physical NMIs:
10100 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
10101 */
10102 case VMX_EXIT_INT_INFO_TYPE_NMI:
10103 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
10104# endif
10105
10106 /*
10107 * Hardware exceptions,
10108 * Software exceptions,
10109 * Privileged software exceptions:
10110 * Figure out if the exception must be delivered to the guest or the nested-guest.
10111 */
10112 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10113 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10114 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10115 {
10116 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10117 | HMVMX_READ_EXIT_INSTR_LEN
10118 | HMVMX_READ_IDT_VECTORING_INFO
10119 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10120
10121 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10122 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
10123 {
10124 /* Exit qualification is required for debug and page-fault exceptions. */
10125 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10126
10127 /*
10128 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
10129 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
10130 * length. However, if delivery of a software interrupt, software exception or privileged
10131 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
10132 */
10133 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10134 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
10135 pVmxTransient->uExitIntErrorCode,
10136 pVmxTransient->uIdtVectoringInfo,
10137 pVmxTransient->uIdtVectoringErrorCode);
10138#ifdef DEBUG_ramshankar
10139 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10140 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
10141 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
10142 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
10143 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
10144 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
10145#endif
10146 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
10147 }
10148
10149 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10150 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10151 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10152 }
10153
10154 /*
10155 * Software interrupts:
10156 * VM-exits cannot be caused by software interrupts.
10157 *
10158 * External interrupts:
10159 * This should only happen when "acknowledge external interrupts on VM-exit"
10160 * control is set. However, we never set this when executing a guest or
10161 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10162 * the guest.
10163 */
10164 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10165 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10166 default:
10167 {
10168 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10169 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10170 }
10171 }
10172}
10173
10174
10175/**
10176 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10177 * Unconditional VM-exit.
10178 */
10179HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10180{
10181 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10182 return IEMExecVmxVmexitTripleFault(pVCpu);
10183}
10184
10185
10186/**
10187 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10188 */
10189HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10190{
10191 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10192
10193 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10194 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10195 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10196}
10197
10198
10199/**
10200 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10201 */
10202HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10203{
10204 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10205
10206 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10207 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10208 return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
10209}
10210
10211
10212/**
10213 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10214 * Unconditional VM-exit.
10215 */
10216HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10217{
10218 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10219
10220 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10221 | HMVMX_READ_EXIT_INSTR_LEN
10222 | HMVMX_READ_IDT_VECTORING_INFO
10223 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10224
10225 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10226 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10227 pVmxTransient->uIdtVectoringErrorCode);
10228 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10229}
10230
10231
10232/**
10233 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10234 */
10235HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10236{
10237 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10238
10239 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10240 {
10241 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10242 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10243 }
10244 return vmxHCExitHlt(pVCpu, pVmxTransient);
10245}
10246
10247
10248/**
10249 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10250 */
10251HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10252{
10253 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10254
10255 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10256 {
10257 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10258 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10259 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10260 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10261 }
10262 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10263}
10264
10265
10266/**
10267 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10268 */
10269HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10270{
10271 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10272
10273 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10274 {
10275 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10276 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10277 }
10278 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10279}
10280
10281
10282/**
10283 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10284 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10285 */
10286HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10287{
10288 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10289
10290 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10291 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10292
10293 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10294
10295 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10296 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10297 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10298
10299 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10300 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10301 u64VmcsField &= UINT64_C(0xffffffff);
10302
10303 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10304 {
10305 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10306 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10307 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10308 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10309 }
10310
10311 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10312 return vmxHCExitVmread(pVCpu, pVmxTransient);
10313 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10314}
10315
10316
10317/**
10318 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10319 */
10320HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10321{
10322 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10323
10324 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10325 {
10326 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10327 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10328 }
10329
10330 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10331}
10332
10333
10334/**
10335 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10336 * Conditional VM-exit.
10337 */
10338HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10339{
10340 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10341
10342 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10343 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10344
10345 VBOXSTRICTRC rcStrict;
10346 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10347 switch (uAccessType)
10348 {
10349 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10350 {
10351 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10352 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10353 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10354 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10355
10356 bool fIntercept;
10357 switch (iCrReg)
10358 {
10359 case 0:
10360 case 4:
10361 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10362 break;
10363
10364 case 3:
10365 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10366 break;
10367
10368 case 8:
10369 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10370 break;
10371
10372 default:
10373 fIntercept = false;
10374 break;
10375 }
10376 if (fIntercept)
10377 {
10378 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10379 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10380 }
10381 else
10382 {
10383 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10384 AssertRCReturn(rc, rc);
10385 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10386 }
10387 break;
10388 }
10389
10390 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10391 {
10392 /*
10393 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10394 * CR2 reads do not cause a VM-exit.
10395 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10396 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10397 */
10398 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10399 if ( iCrReg == 3
10400 || iCrReg == 8)
10401 {
10402 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10403 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10404 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10405 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10406 {
10407 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10408 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10409 }
10410 else
10411 {
10412 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10413 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10414 }
10415 }
10416 else
10417 {
10418 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10419 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10420 }
10421 break;
10422 }
10423
10424 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10425 {
10426 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10427 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10428 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10429 if ( (uGstHostMask & X86_CR0_TS)
10430 && (uReadShadow & X86_CR0_TS))
10431 {
10432 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10433 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10434 }
10435 else
10436 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10437 break;
10438 }
10439
10440 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10441 {
10442 RTGCPTR GCPtrEffDst;
10443 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10444 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10445 if (fMemOperand)
10446 {
10447 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10448 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10449 }
10450 else
10451 GCPtrEffDst = NIL_RTGCPTR;
10452
10453 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10454 {
10455 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10456 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10457 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10458 }
10459 else
10460 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10461 break;
10462 }
10463
10464 default:
10465 {
10466 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10467 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10468 }
10469 }
10470
10471 if (rcStrict == VINF_IEM_RAISED_XCPT)
10472 {
10473 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10474 rcStrict = VINF_SUCCESS;
10475 }
10476 return rcStrict;
10477}
10478
10479
10480/**
10481 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10482 * Conditional VM-exit.
10483 */
10484HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10485{
10486 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10487
10488 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10489 {
10490 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10491 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10492 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10493 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10494 }
10495 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10496}
10497
10498
10499/**
10500 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10501 * Conditional VM-exit.
10502 */
10503HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10504{
10505 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10506
10507 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10508
10509 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10510 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10511 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10512
10513 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10514 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10515 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10516 {
10517 /*
10518 * IN/OUT instruction:
10519 * - Provides VM-exit instruction length.
10520 *
10521 * INS/OUTS instruction:
10522 * - Provides VM-exit instruction length.
10523 * - Provides Guest-linear address.
10524 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10525 */
10526 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10527 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10528
10529 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10530 pVmxTransient->ExitInstrInfo.u = 0;
10531 pVmxTransient->uGuestLinearAddr = 0;
10532
10533 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10534 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10535 if (fIOString)
10536 {
10537 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10538 if (fVmxInsOutsInfo)
10539 {
10540 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10541 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10542 }
10543 }
10544
10545 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10546 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10547 }
10548 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10549}
10550
10551
10552/**
10553 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10554 */
10555HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10556{
10557 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10558
10559 uint32_t fMsrpm;
10560 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10561 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10562 else
10563 fMsrpm = VMXMSRPM_EXIT_RD;
10564
10565 if (fMsrpm & VMXMSRPM_EXIT_RD)
10566 {
10567 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10568 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10569 }
10570 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10571}
10572
10573
10574/**
10575 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10576 */
10577HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10578{
10579 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10580
10581 uint32_t fMsrpm;
10582 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10583 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10584 else
10585 fMsrpm = VMXMSRPM_EXIT_WR;
10586
10587 if (fMsrpm & VMXMSRPM_EXIT_WR)
10588 {
10589 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10590 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10591 }
10592 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10593}
10594
10595
10596/**
10597 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10598 */
10599HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10600{
10601 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10602
10603 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10604 {
10605 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10606 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10607 }
10608 return vmxHCExitMwait(pVCpu, pVmxTransient);
10609}
10610
10611
10612/**
10613 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10614 * VM-exit.
10615 */
10616HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10617{
10618 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10619
10620 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10621 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10622 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10623 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10624}
10625
10626
10627/**
10628 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10629 */
10630HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10631{
10632 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10633
10634 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10635 {
10636 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10637 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10638 }
10639 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10640}
10641
10642
10643/**
10644 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10645 */
10646HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10647{
10648 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10649
10650 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10651 * PAUSE when executing a nested-guest? If it does not, we would not need
10652 * to check for the intercepts here. Just call VM-exit... */
10653
10654 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10655 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10656 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10657 {
10658 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10659 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10660 }
10661 return vmxHCExitPause(pVCpu, pVmxTransient);
10662}
10663
10664
10665/**
10666 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10667 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10668 */
10669HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10670{
10671 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10672
10673 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10674 {
10675 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10676 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10677 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10678 }
10679 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10680}
10681
10682
10683/**
10684 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10685 * VM-exit.
10686 */
10687HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10688{
10689 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10690
10691 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10692 | HMVMX_READ_EXIT_INSTR_LEN
10693 | HMVMX_READ_IDT_VECTORING_INFO
10694 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10695
10696 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10697
10698 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10699 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10700
10701 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10702 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10703 pVmxTransient->uIdtVectoringErrorCode);
10704 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10705}
10706
10707
10708/**
10709 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10710 * Conditional VM-exit.
10711 */
10712HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10713{
10714 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10715
10716 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10717 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10718 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10719}
10720
10721
10722/**
10723 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10724 * Conditional VM-exit.
10725 */
10726HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10727{
10728 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10729
10730 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10731 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10732 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10733}
10734
10735
10736/**
10737 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10738 */
10739HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10740{
10741 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10742
10743 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10744 {
10745 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10746 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10747 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10748 }
10749 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10750}
10751
10752
10753/**
10754 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10755 */
10756HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10757{
10758 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10759
10760 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10761 {
10762 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10763 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10764 }
10765 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10766}
10767
10768
10769/**
10770 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10771 */
10772HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10773{
10774 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10775
10776 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10777 {
10778 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10779 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10780 | HMVMX_READ_EXIT_INSTR_INFO
10781 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10782 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10783 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10784 }
10785 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10786}
10787
10788
10789/**
10790 * Nested-guest VM-exit handler for invalid-guest state
10791 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10792 */
10793HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10794{
10795 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10796
10797 /*
10798 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10799 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10800 * Handle it like it's in an invalid guest state of the outer guest.
10801 *
10802 * When the fast path is implemented, this should be changed to cause the corresponding
10803 * nested-guest VM-exit.
10804 */
10805 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10806}
10807
10808
10809/**
10810 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10811 * and only provide the instruction length.
10812 *
10813 * Unconditional VM-exit.
10814 */
10815HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10816{
10817 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10818
10819#ifdef VBOX_STRICT
10820 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10821 switch (pVmxTransient->uExitReason)
10822 {
10823 case VMX_EXIT_ENCLS:
10824 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10825 break;
10826
10827 case VMX_EXIT_VMFUNC:
10828 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10829 break;
10830 }
10831#endif
10832
10833 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10834 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10835}
10836
10837
10838/**
10839 * Nested-guest VM-exit handler for instructions that provide instruction length as
10840 * well as more information.
10841 *
10842 * Unconditional VM-exit.
10843 */
10844HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10845{
10846 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10847
10848# ifdef VBOX_STRICT
10849 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10850 switch (pVmxTransient->uExitReason)
10851 {
10852 case VMX_EXIT_GDTR_IDTR_ACCESS:
10853 case VMX_EXIT_LDTR_TR_ACCESS:
10854 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10855 break;
10856
10857 case VMX_EXIT_RDRAND:
10858 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10859 break;
10860
10861 case VMX_EXIT_RDSEED:
10862 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10863 break;
10864
10865 case VMX_EXIT_XSAVES:
10866 case VMX_EXIT_XRSTORS:
10867 /** @todo NSTVMX: Verify XSS-bitmap. */
10868 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10869 break;
10870
10871 case VMX_EXIT_UMWAIT:
10872 case VMX_EXIT_TPAUSE:
10873 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10874 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10875 break;
10876
10877 case VMX_EXIT_LOADIWKEY:
10878 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10879 break;
10880 }
10881# endif
10882
10883 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10884 | HMVMX_READ_EXIT_INSTR_LEN
10885 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10886 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10887 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10888}
10889
10890# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10891
10892/**
10893 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10894 * Conditional VM-exit.
10895 */
10896HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10897{
10898 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10899 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10900
10901 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10902 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10903 {
10904 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10905 | HMVMX_READ_EXIT_INSTR_LEN
10906 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10907 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10908 | HMVMX_READ_IDT_VECTORING_INFO
10909 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10910 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10911 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10912 AssertRCReturn(rc, rc);
10913
10914 /*
10915 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10916 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10917 * it's its problem to deal with that issue and we'll clear the recovered event.
10918 */
10919 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10920 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10921 { /*likely*/ }
10922 else
10923 {
10924 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10925 return rcStrict;
10926 }
10927 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10928
10929 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10930 uint64_t const uExitQual = pVmxTransient->uExitQual;
10931
10932 RTGCPTR GCPtrNestedFault;
10933 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10934 if (fIsLinearAddrValid)
10935 {
10936 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10937 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10938 }
10939 else
10940 GCPtrNestedFault = 0;
10941
10942 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10943 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10944 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10945 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10946 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10947
10948 PGMPTWALK Walk;
10949 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10950 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10951 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10952 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10953 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10954 if (RT_SUCCESS(rcStrict))
10955 {
10956 if (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE)
10957 {
10958 Assert(!fClearEventOnForward);
10959 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM));
10960 rcStrict = VINF_EM_RESCHEDULE_REM;
10961 }
10962 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
10963 return rcStrict;
10964 }
10965
10966 if (fClearEventOnForward)
10967 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10968
10969 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10970 pVmxTransient->uIdtVectoringErrorCode);
10971 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10972 {
10973 VMXVEXITINFO const ExitInfo
10974 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10975 pVmxTransient->uExitQual,
10976 pVmxTransient->cbExitInstr,
10977 pVmxTransient->uGuestLinearAddr,
10978 pVmxTransient->uGuestPhysicalAddr);
10979 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10980 }
10981
10982 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10983 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10984 }
10985
10986 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10987}
10988
10989
10990/**
10991 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10992 * Conditional VM-exit.
10993 */
10994HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10995{
10996 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10997 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10998
10999 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11000 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
11001 {
11002 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
11003 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
11004 AssertRCReturn(rc, rc);
11005
11006 PGMPTWALK Walk;
11007 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11008 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
11009 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
11010 GCPhysNestedFault, false /* fIsLinearAddrValid */,
11011 0 /* GCPtrNestedFault */, &Walk);
11012 if (RT_SUCCESS(rcStrict))
11013 {
11014 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
11015 return rcStrict;
11016 }
11017
11018 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
11019 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
11020 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
11021
11022 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
11023 pVmxTransient->uIdtVectoringErrorCode);
11024 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
11025 }
11026
11027 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
11028}
11029
11030# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
11031
11032/** @} */
11033#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
11034
11035
11036/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
11037 * probes.
11038 *
11039 * The following few functions and associated structure contains the bloat
11040 * necessary for providing detailed debug events and dtrace probes as well as
11041 * reliable host side single stepping. This works on the principle of
11042 * "subclassing" the normal execution loop and workers. We replace the loop
11043 * method completely and override selected helpers to add necessary adjustments
11044 * to their core operation.
11045 *
11046 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
11047 * any performance for debug and analysis features.
11048 *
11049 * @{
11050 */
11051
11052/**
11053 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
11054 * the debug run loop.
11055 */
11056typedef struct VMXRUNDBGSTATE
11057{
11058 /** The RIP we started executing at. This is for detecting that we stepped. */
11059 uint64_t uRipStart;
11060 /** The CS we started executing with. */
11061 uint16_t uCsStart;
11062
11063 /** Whether we've actually modified the 1st execution control field. */
11064 bool fModifiedProcCtls : 1;
11065 /** Whether we've actually modified the 2nd execution control field. */
11066 bool fModifiedProcCtls2 : 1;
11067 /** Whether we've actually modified the exception bitmap. */
11068 bool fModifiedXcptBitmap : 1;
11069
11070 /** We desire the modified the CR0 mask to be cleared. */
11071 bool fClearCr0Mask : 1;
11072 /** We desire the modified the CR4 mask to be cleared. */
11073 bool fClearCr4Mask : 1;
11074 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
11075 uint32_t fCpe1Extra;
11076 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
11077 uint32_t fCpe1Unwanted;
11078 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
11079 uint32_t fCpe2Extra;
11080 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
11081 uint32_t bmXcptExtra;
11082 /** The sequence number of the Dtrace provider settings the state was
11083 * configured against. */
11084 uint32_t uDtraceSettingsSeqNo;
11085 /** VM-exits to check (one bit per VM-exit). */
11086 uint32_t bmExitsToCheck[3];
11087
11088 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
11089 uint32_t fProcCtlsInitial;
11090 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
11091 uint32_t fProcCtls2Initial;
11092 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
11093 uint32_t bmXcptInitial;
11094} VMXRUNDBGSTATE;
11095AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
11096typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
11097
11098
11099/**
11100 * Initializes the VMXRUNDBGSTATE structure.
11101 *
11102 * @param pVCpu The cross context virtual CPU structure of the
11103 * calling EMT.
11104 * @param pVmxTransient The VMX-transient structure.
11105 * @param pDbgState The debug state to initialize.
11106 */
11107static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11108{
11109 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
11110 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
11111
11112 pDbgState->fModifiedProcCtls = false;
11113 pDbgState->fModifiedProcCtls2 = false;
11114 pDbgState->fModifiedXcptBitmap = false;
11115 pDbgState->fClearCr0Mask = false;
11116 pDbgState->fClearCr4Mask = false;
11117 pDbgState->fCpe1Extra = 0;
11118 pDbgState->fCpe1Unwanted = 0;
11119 pDbgState->fCpe2Extra = 0;
11120 pDbgState->bmXcptExtra = 0;
11121 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11122 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11123 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11124}
11125
11126
11127/**
11128 * Updates the VMSC fields with changes requested by @a pDbgState.
11129 *
11130 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11131 * immediately before executing guest code, i.e. when interrupts are disabled.
11132 * We don't check status codes here as we cannot easily assert or return in the
11133 * latter case.
11134 *
11135 * @param pVCpu The cross context virtual CPU structure.
11136 * @param pVmxTransient The VMX-transient structure.
11137 * @param pDbgState The debug state.
11138 */
11139static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11140{
11141 /*
11142 * Ensure desired flags in VMCS control fields are set.
11143 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11144 *
11145 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11146 * there should be no stale data in pCtx at this point.
11147 */
11148 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11149 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11150 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11151 {
11152 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11153 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11154 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11155 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11156 pDbgState->fModifiedProcCtls = true;
11157 }
11158
11159 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11160 {
11161 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11162 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11163 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11164 pDbgState->fModifiedProcCtls2 = true;
11165 }
11166
11167 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11168 {
11169 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11170 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11171 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11172 pDbgState->fModifiedXcptBitmap = true;
11173 }
11174
11175 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11176 {
11177 pVmcsInfo->u64Cr0Mask = 0;
11178 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11179 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11180 }
11181
11182 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11183 {
11184 pVmcsInfo->u64Cr4Mask = 0;
11185 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11186 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11187 }
11188
11189 NOREF(pVCpu);
11190}
11191
11192
11193/**
11194 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11195 * re-entry next time around.
11196 *
11197 * @returns Strict VBox status code (i.e. informational status codes too).
11198 * @param pVCpu The cross context virtual CPU structure.
11199 * @param pVmxTransient The VMX-transient structure.
11200 * @param pDbgState The debug state.
11201 * @param rcStrict The return code from executing the guest using single
11202 * stepping.
11203 */
11204static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11205 VBOXSTRICTRC rcStrict)
11206{
11207 /*
11208 * Restore VM-exit control settings as we may not reenter this function the
11209 * next time around.
11210 */
11211 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11212
11213 /* We reload the initial value, trigger what we can of recalculations the
11214 next time around. From the looks of things, that's all that's required atm. */
11215 if (pDbgState->fModifiedProcCtls)
11216 {
11217 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11218 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11219 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11220 AssertRC(rc2);
11221 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11222 }
11223
11224 /* We're currently the only ones messing with this one, so just restore the
11225 cached value and reload the field. */
11226 if ( pDbgState->fModifiedProcCtls2
11227 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11228 {
11229 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11230 AssertRC(rc2);
11231 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11232 }
11233
11234 /* If we've modified the exception bitmap, we restore it and trigger
11235 reloading and partial recalculation the next time around. */
11236 if (pDbgState->fModifiedXcptBitmap)
11237 {
11238 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11239 AssertRC(rc2);
11240 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11241 }
11242
11243 return rcStrict;
11244}
11245
11246
11247/**
11248 * Configures VM-exit controls for current DBGF and DTrace settings.
11249 *
11250 * This updates @a pDbgState and the VMCS execution control fields to reflect
11251 * the necessary VM-exits demanded by DBGF and DTrace.
11252 *
11253 * @param pVCpu The cross context virtual CPU structure.
11254 * @param pVmxTransient The VMX-transient structure. May update
11255 * fUpdatedTscOffsettingAndPreemptTimer.
11256 * @param pDbgState The debug state.
11257 */
11258static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11259{
11260#ifndef IN_NEM_DARWIN
11261 /*
11262 * Take down the dtrace serial number so we can spot changes.
11263 */
11264 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11265 ASMCompilerBarrier();
11266#endif
11267
11268 /*
11269 * We'll rebuild most of the middle block of data members (holding the
11270 * current settings) as we go along here, so start by clearing it all.
11271 */
11272 pDbgState->bmXcptExtra = 0;
11273 pDbgState->fCpe1Extra = 0;
11274 pDbgState->fCpe1Unwanted = 0;
11275 pDbgState->fCpe2Extra = 0;
11276 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11277 pDbgState->bmExitsToCheck[i] = 0;
11278
11279 /*
11280 * Software interrupts (INT XXh) - no idea how to trigger these...
11281 */
11282 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11283 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11284 || VBOXVMM_INT_SOFTWARE_ENABLED())
11285 {
11286 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11287 }
11288
11289 /*
11290 * INT3 breakpoints - triggered by #BP exceptions.
11291 */
11292 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11293 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11294
11295 /*
11296 * Exception bitmap and XCPT events+probes.
11297 */
11298 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11299 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11300 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11301
11302 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11303 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11304 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11305 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11306 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11307 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11308 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11309 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11310 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11311 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11312 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11313 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11314 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11315 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11316 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11317 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11318 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11319 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11320
11321 if (pDbgState->bmXcptExtra)
11322 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11323
11324 /*
11325 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11326 *
11327 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11328 * So, when adding/changing/removing please don't forget to update it.
11329 *
11330 * Some of the macros are picking up local variables to save horizontal space,
11331 * (being able to see it in a table is the lesser evil here).
11332 */
11333#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11334 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11335 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11336#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11337 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11338 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11339 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11340 } else do { } while (0)
11341#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11342 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11343 { \
11344 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11345 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11346 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11347 } else do { } while (0)
11348#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11349 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11350 { \
11351 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11352 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11353 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11354 } else do { } while (0)
11355#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11356 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11357 { \
11358 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11359 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11360 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11361 } else do { } while (0)
11362
11363 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11364 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11365 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11366 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11367 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11368
11369 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11370 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11371 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11372 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11373 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11374 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11375 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11376 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11377 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11378 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11379 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11380 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11381 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11382 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11383 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11384 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11385 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11386 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11387 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11388 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11389 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11390 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11391 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11392 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11393 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11394 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11395 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11396 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11397 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11398 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11399 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11400 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11401 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11402 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11403 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11404 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11405
11406 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11407 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11408 {
11409 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11410 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11411 AssertRC(rc);
11412
11413#if 0 /** @todo fix me */
11414 pDbgState->fClearCr0Mask = true;
11415 pDbgState->fClearCr4Mask = true;
11416#endif
11417 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11418 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11419 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11420 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11421 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11422 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11423 require clearing here and in the loop if we start using it. */
11424 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11425 }
11426 else
11427 {
11428 if (pDbgState->fClearCr0Mask)
11429 {
11430 pDbgState->fClearCr0Mask = false;
11431 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11432 }
11433 if (pDbgState->fClearCr4Mask)
11434 {
11435 pDbgState->fClearCr4Mask = false;
11436 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11437 }
11438 }
11439 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11440 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11441
11442 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11443 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11444 {
11445 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11446 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11447 }
11448 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11449 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11450
11451 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11452 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11453 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11454 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11455 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11456 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11457 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11458 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11459#if 0 /** @todo too slow, fix handler. */
11460 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11461#endif
11462 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11463
11464 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11465 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11466 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11467 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11468 {
11469 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11470 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11471 }
11472 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11473 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11474 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11475 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11476
11477 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11478 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11479 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11480 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11481 {
11482 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11483 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11484 }
11485 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11486 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11487 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11488 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11489
11490 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11491 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11492 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11493 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11494 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11495 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11496 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11497 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11498 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11499 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11500 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11501 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11502 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11503 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11504 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11505 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11506 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11507 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11508 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11509 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11510 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11511 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11512
11513#undef IS_EITHER_ENABLED
11514#undef SET_ONLY_XBM_IF_EITHER_EN
11515#undef SET_CPE1_XBM_IF_EITHER_EN
11516#undef SET_CPEU_XBM_IF_EITHER_EN
11517#undef SET_CPE2_XBM_IF_EITHER_EN
11518
11519 /*
11520 * Sanitize the control stuff.
11521 */
11522 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11523 if (pDbgState->fCpe2Extra)
11524 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11525 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11526 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11527#ifndef IN_NEM_DARWIN
11528 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11529 {
11530 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11531 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11532 }
11533#else
11534 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11535 {
11536 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11537 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11538 }
11539#endif
11540
11541 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11542 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11543 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11544 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11545}
11546
11547
11548/**
11549 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11550 * appropriate.
11551 *
11552 * The caller has checked the VM-exit against the
11553 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11554 * already, so we don't have to do that either.
11555 *
11556 * @returns Strict VBox status code (i.e. informational status codes too).
11557 * @param pVCpu The cross context virtual CPU structure.
11558 * @param pVmxTransient The VMX-transient structure.
11559 * @param uExitReason The VM-exit reason.
11560 *
11561 * @remarks The name of this function is displayed by dtrace, so keep it short
11562 * and to the point. No longer than 33 chars long, please.
11563 */
11564static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11565{
11566 /*
11567 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11568 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11569 *
11570 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11571 * does. Must add/change/remove both places. Same ordering, please.
11572 *
11573 * Added/removed events must also be reflected in the next section
11574 * where we dispatch dtrace events.
11575 */
11576 bool fDtrace1 = false;
11577 bool fDtrace2 = false;
11578 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11579 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11580 uint32_t uEventArg = 0;
11581#define SET_EXIT(a_EventSubName) \
11582 do { \
11583 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11584 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11585 } while (0)
11586#define SET_BOTH(a_EventSubName) \
11587 do { \
11588 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11589 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11590 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11591 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11592 } while (0)
11593 switch (uExitReason)
11594 {
11595 case VMX_EXIT_MTF:
11596 return vmxHCExitMtf(pVCpu, pVmxTransient);
11597
11598 case VMX_EXIT_XCPT_OR_NMI:
11599 {
11600 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11601 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11602 {
11603 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11604 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11605 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11606 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11607 {
11608 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11609 {
11610 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11611 uEventArg = pVmxTransient->uExitIntErrorCode;
11612 }
11613 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11614 switch (enmEvent1)
11615 {
11616 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11617 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11618 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11619 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11620 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11621 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11622 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11623 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11624 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11625 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11626 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11627 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11628 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11629 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11630 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11631 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11632 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11633 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11634 default: break;
11635 }
11636 }
11637 else
11638 AssertFailed();
11639 break;
11640
11641 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11642 uEventArg = idxVector;
11643 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11644 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11645 break;
11646 }
11647 break;
11648 }
11649
11650 case VMX_EXIT_TRIPLE_FAULT:
11651 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11652 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11653 break;
11654 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11655 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11656 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11657 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11658 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11659
11660 /* Instruction specific VM-exits: */
11661 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11662 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11663 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11664 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11665 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11666 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11667 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11668 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11669 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11670 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11671 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11672 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11673 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11674 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11675 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11676 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11677 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11678 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11679 case VMX_EXIT_MOV_CRX:
11680 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11681 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11682 SET_BOTH(CRX_READ);
11683 else
11684 SET_BOTH(CRX_WRITE);
11685 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11686 break;
11687 case VMX_EXIT_MOV_DRX:
11688 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11689 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11690 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11691 SET_BOTH(DRX_READ);
11692 else
11693 SET_BOTH(DRX_WRITE);
11694 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11695 break;
11696 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11697 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11698 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11699 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11700 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11701 case VMX_EXIT_GDTR_IDTR_ACCESS:
11702 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11703 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11704 {
11705 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11706 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11707 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11708 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11709 }
11710 break;
11711
11712 case VMX_EXIT_LDTR_TR_ACCESS:
11713 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11714 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11715 {
11716 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11717 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11718 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11719 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11720 }
11721 break;
11722
11723 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11724 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11725 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11726 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11727 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11728 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11729 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11730 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11731 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11732 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11733 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11734
11735 /* Events that aren't relevant at this point. */
11736 case VMX_EXIT_EXT_INT:
11737 case VMX_EXIT_INT_WINDOW:
11738 case VMX_EXIT_NMI_WINDOW:
11739 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11740 case VMX_EXIT_PREEMPT_TIMER:
11741 case VMX_EXIT_IO_INSTR:
11742 break;
11743
11744 /* Errors and unexpected events. */
11745 case VMX_EXIT_INIT_SIGNAL:
11746 case VMX_EXIT_SIPI:
11747 case VMX_EXIT_IO_SMI:
11748 case VMX_EXIT_SMI:
11749 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11750 case VMX_EXIT_ERR_MSR_LOAD:
11751 case VMX_EXIT_ERR_MACHINE_CHECK:
11752 case VMX_EXIT_PML_FULL:
11753 case VMX_EXIT_VIRTUALIZED_EOI:
11754 break;
11755
11756 default:
11757 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11758 break;
11759 }
11760#undef SET_BOTH
11761#undef SET_EXIT
11762
11763 /*
11764 * Dtrace tracepoints go first. We do them here at once so we don't
11765 * have to copy the guest state saving and stuff a few dozen times.
11766 * Down side is that we've got to repeat the switch, though this time
11767 * we use enmEvent since the probes are a subset of what DBGF does.
11768 */
11769 if (fDtrace1 || fDtrace2)
11770 {
11771 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11772 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11773 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11774 switch (enmEvent1)
11775 {
11776 /** @todo consider which extra parameters would be helpful for each probe. */
11777 case DBGFEVENT_END: break;
11778 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11779 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11780 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11781 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11782 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11783 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11784 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11785 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11786 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11787 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11788 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11789 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11790 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11791 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11792 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11793 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11794 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11795 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11796 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11797 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11798 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11799 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11800 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11801 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11802 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11803 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11804 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11805 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11806 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11807 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11808 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11809 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11810 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11811 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11812 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11813 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11814 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11815 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11816 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11817 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11818 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11819 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11820 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11821 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11822 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11823 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11824 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11825 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11826 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11827 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11828 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11829 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11830 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11831 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11832 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11833 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11834 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11835 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11836 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11837 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11838 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11839 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11840 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11841 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11842 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11843 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11844 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11845 }
11846 switch (enmEvent2)
11847 {
11848 /** @todo consider which extra parameters would be helpful for each probe. */
11849 case DBGFEVENT_END: break;
11850 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11851 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11852 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11853 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11854 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11855 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11856 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11857 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11858 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11859 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11860 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11861 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11862 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11863 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11864 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11865 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11866 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11867 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11868 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11869 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11870 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11871 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11872 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11873 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11874 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11875 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11876 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11877 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11878 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11879 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11880 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11881 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11882 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11883 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11884 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11885 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11886 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11887 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11888 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11889 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11890 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11891 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11892 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11893 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11894 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11895 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11896 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11897 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11898 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11899 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11900 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11901 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11902 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11903 }
11904 }
11905
11906 /*
11907 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11908 * the DBGF call will do a full check).
11909 *
11910 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11911 * Note! If we have to events, we prioritize the first, i.e. the instruction
11912 * one, in order to avoid event nesting.
11913 */
11914 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11915 if ( enmEvent1 != DBGFEVENT_END
11916 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11917 {
11918 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11919 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11920 if (rcStrict != VINF_SUCCESS)
11921 return rcStrict;
11922 }
11923 else if ( enmEvent2 != DBGFEVENT_END
11924 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11925 {
11926 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11927 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11928 if (rcStrict != VINF_SUCCESS)
11929 return rcStrict;
11930 }
11931
11932 return VINF_SUCCESS;
11933}
11934
11935
11936/**
11937 * Single-stepping VM-exit filtering.
11938 *
11939 * This is preprocessing the VM-exits and deciding whether we've gotten far
11940 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11941 * handling is performed.
11942 *
11943 * @returns Strict VBox status code (i.e. informational status codes too).
11944 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11945 * @param pVmxTransient The VMX-transient structure.
11946 * @param pDbgState The debug state.
11947 */
11948DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11949{
11950 /*
11951 * Expensive (saves context) generic dtrace VM-exit probe.
11952 */
11953 uint32_t const uExitReason = pVmxTransient->uExitReason;
11954 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11955 { /* more likely */ }
11956 else
11957 {
11958 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11959 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11960 AssertRC(rc);
11961 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11962 }
11963
11964#ifndef IN_NEM_DARWIN
11965 /*
11966 * Check for host NMI, just to get that out of the way.
11967 */
11968 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11969 { /* normally likely */ }
11970 else
11971 {
11972 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11973 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11974 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11975 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11976 }
11977#endif
11978
11979 /*
11980 * Check for single stepping event if we're stepping.
11981 */
11982 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11983 {
11984 switch (uExitReason)
11985 {
11986 case VMX_EXIT_MTF:
11987 return vmxHCExitMtf(pVCpu, pVmxTransient);
11988
11989 /* Various events: */
11990 case VMX_EXIT_XCPT_OR_NMI:
11991 case VMX_EXIT_EXT_INT:
11992 case VMX_EXIT_TRIPLE_FAULT:
11993 case VMX_EXIT_INT_WINDOW:
11994 case VMX_EXIT_NMI_WINDOW:
11995 case VMX_EXIT_TASK_SWITCH:
11996 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11997 case VMX_EXIT_APIC_ACCESS:
11998 case VMX_EXIT_EPT_VIOLATION:
11999 case VMX_EXIT_EPT_MISCONFIG:
12000 case VMX_EXIT_PREEMPT_TIMER:
12001
12002 /* Instruction specific VM-exits: */
12003 case VMX_EXIT_CPUID:
12004 case VMX_EXIT_GETSEC:
12005 case VMX_EXIT_HLT:
12006 case VMX_EXIT_INVD:
12007 case VMX_EXIT_INVLPG:
12008 case VMX_EXIT_RDPMC:
12009 case VMX_EXIT_RDTSC:
12010 case VMX_EXIT_RSM:
12011 case VMX_EXIT_VMCALL:
12012 case VMX_EXIT_VMCLEAR:
12013 case VMX_EXIT_VMLAUNCH:
12014 case VMX_EXIT_VMPTRLD:
12015 case VMX_EXIT_VMPTRST:
12016 case VMX_EXIT_VMREAD:
12017 case VMX_EXIT_VMRESUME:
12018 case VMX_EXIT_VMWRITE:
12019 case VMX_EXIT_VMXOFF:
12020 case VMX_EXIT_VMXON:
12021 case VMX_EXIT_MOV_CRX:
12022 case VMX_EXIT_MOV_DRX:
12023 case VMX_EXIT_IO_INSTR:
12024 case VMX_EXIT_RDMSR:
12025 case VMX_EXIT_WRMSR:
12026 case VMX_EXIT_MWAIT:
12027 case VMX_EXIT_MONITOR:
12028 case VMX_EXIT_PAUSE:
12029 case VMX_EXIT_GDTR_IDTR_ACCESS:
12030 case VMX_EXIT_LDTR_TR_ACCESS:
12031 case VMX_EXIT_INVEPT:
12032 case VMX_EXIT_RDTSCP:
12033 case VMX_EXIT_INVVPID:
12034 case VMX_EXIT_WBINVD:
12035 case VMX_EXIT_XSETBV:
12036 case VMX_EXIT_RDRAND:
12037 case VMX_EXIT_INVPCID:
12038 case VMX_EXIT_VMFUNC:
12039 case VMX_EXIT_RDSEED:
12040 case VMX_EXIT_XSAVES:
12041 case VMX_EXIT_XRSTORS:
12042 {
12043 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
12044 AssertRCReturn(rc, rc);
12045 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
12046 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
12047 return VINF_EM_DBG_STEPPED;
12048 break;
12049 }
12050
12051 /* Errors and unexpected events: */
12052 case VMX_EXIT_INIT_SIGNAL:
12053 case VMX_EXIT_SIPI:
12054 case VMX_EXIT_IO_SMI:
12055 case VMX_EXIT_SMI:
12056 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
12057 case VMX_EXIT_ERR_MSR_LOAD:
12058 case VMX_EXIT_ERR_MACHINE_CHECK:
12059 case VMX_EXIT_PML_FULL:
12060 case VMX_EXIT_VIRTUALIZED_EOI:
12061 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
12062 break;
12063
12064 default:
12065 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
12066 break;
12067 }
12068 }
12069
12070 /*
12071 * Check for debugger event breakpoints and dtrace probes.
12072 */
12073 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
12074 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
12075 {
12076 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
12077 if (rcStrict != VINF_SUCCESS)
12078 return rcStrict;
12079 }
12080
12081 /*
12082 * Normal processing.
12083 */
12084#ifdef HMVMX_USE_FUNCTION_TABLE
12085 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
12086#else
12087 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
12088#endif
12089}
12090
12091/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette