VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 100764

Last change on this file since 100764 was 100244, checked in by vboxsync, 18 months ago

VMM: Nested VMX: bugref:10318 Guru meditate if the nested-EPT trap handler does not indicate an EPT misconfig. when we expect it to.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 526.4 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 100244 2023-06-22 03:59:01Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
413 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
414 VMX_VMCS64_CTRL_EXIT2_FULL,
415 VMX_VMCS64_CTRL_EXIT2_HIGH,
416
417 /* 64-bit read-only data fields. */
418 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
420
421 /* 64-bit guest-state fields. */
422 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
424 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
425 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
426 VMX_VMCS64_GUEST_PAT_FULL,
427 VMX_VMCS64_GUEST_PAT_HIGH,
428 VMX_VMCS64_GUEST_EFER_FULL,
429 VMX_VMCS64_GUEST_EFER_HIGH,
430 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
432 VMX_VMCS64_GUEST_PDPTE0_FULL,
433 VMX_VMCS64_GUEST_PDPTE0_HIGH,
434 VMX_VMCS64_GUEST_PDPTE1_FULL,
435 VMX_VMCS64_GUEST_PDPTE1_HIGH,
436 VMX_VMCS64_GUEST_PDPTE2_FULL,
437 VMX_VMCS64_GUEST_PDPTE2_HIGH,
438 VMX_VMCS64_GUEST_PDPTE3_FULL,
439 VMX_VMCS64_GUEST_PDPTE3_HIGH,
440 VMX_VMCS64_GUEST_BNDCFGS_FULL,
441 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
442 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
443 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
444 VMX_VMCS64_GUEST_PKRS_FULL,
445 VMX_VMCS64_GUEST_PKRS_HIGH,
446
447 /* 64-bit host-state fields. */
448 VMX_VMCS64_HOST_PAT_FULL,
449 VMX_VMCS64_HOST_PAT_HIGH,
450 VMX_VMCS64_HOST_EFER_FULL,
451 VMX_VMCS64_HOST_EFER_HIGH,
452 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
454 VMX_VMCS64_HOST_PKRS_FULL,
455 VMX_VMCS64_HOST_PKRS_HIGH,
456
457 /* 32-bit control fields. */
458 VMX_VMCS32_CTRL_PIN_EXEC,
459 VMX_VMCS32_CTRL_PROC_EXEC,
460 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
461 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
463 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
464 VMX_VMCS32_CTRL_EXIT,
465 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
466 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
467 VMX_VMCS32_CTRL_ENTRY,
468 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
469 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
470 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
471 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
472 VMX_VMCS32_CTRL_TPR_THRESHOLD,
473 VMX_VMCS32_CTRL_PROC_EXEC2,
474 VMX_VMCS32_CTRL_PLE_GAP,
475 VMX_VMCS32_CTRL_PLE_WINDOW,
476
477 /* 32-bits read-only fields. */
478 VMX_VMCS32_RO_VM_INSTR_ERROR,
479 VMX_VMCS32_RO_EXIT_REASON,
480 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
482 VMX_VMCS32_RO_IDT_VECTORING_INFO,
483 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
484 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
485 VMX_VMCS32_RO_EXIT_INSTR_INFO,
486
487 /* 32-bit guest-state fields. */
488 VMX_VMCS32_GUEST_ES_LIMIT,
489 VMX_VMCS32_GUEST_CS_LIMIT,
490 VMX_VMCS32_GUEST_SS_LIMIT,
491 VMX_VMCS32_GUEST_DS_LIMIT,
492 VMX_VMCS32_GUEST_FS_LIMIT,
493 VMX_VMCS32_GUEST_GS_LIMIT,
494 VMX_VMCS32_GUEST_LDTR_LIMIT,
495 VMX_VMCS32_GUEST_TR_LIMIT,
496 VMX_VMCS32_GUEST_GDTR_LIMIT,
497 VMX_VMCS32_GUEST_IDTR_LIMIT,
498 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_INT_STATE,
507 VMX_VMCS32_GUEST_ACTIVITY_STATE,
508 VMX_VMCS32_GUEST_SMBASE,
509 VMX_VMCS32_GUEST_SYSENTER_CS,
510 VMX_VMCS32_PREEMPT_TIMER_VALUE,
511
512 /* 32-bit host-state fields. */
513 VMX_VMCS32_HOST_SYSENTER_CS,
514
515 /* Natural-width control fields. */
516 VMX_VMCS_CTRL_CR0_MASK,
517 VMX_VMCS_CTRL_CR4_MASK,
518 VMX_VMCS_CTRL_CR0_READ_SHADOW,
519 VMX_VMCS_CTRL_CR4_READ_SHADOW,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
524
525 /* Natural-width read-only data fields. */
526 VMX_VMCS_RO_EXIT_QUALIFICATION,
527 VMX_VMCS_RO_IO_RCX,
528 VMX_VMCS_RO_IO_RSI,
529 VMX_VMCS_RO_IO_RDI,
530 VMX_VMCS_RO_IO_RIP,
531 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
532
533 /* Natural-width guest-state field */
534 VMX_VMCS_GUEST_CR0,
535 VMX_VMCS_GUEST_CR3,
536 VMX_VMCS_GUEST_CR4,
537 VMX_VMCS_GUEST_ES_BASE,
538 VMX_VMCS_GUEST_CS_BASE,
539 VMX_VMCS_GUEST_SS_BASE,
540 VMX_VMCS_GUEST_DS_BASE,
541 VMX_VMCS_GUEST_FS_BASE,
542 VMX_VMCS_GUEST_GS_BASE,
543 VMX_VMCS_GUEST_LDTR_BASE,
544 VMX_VMCS_GUEST_TR_BASE,
545 VMX_VMCS_GUEST_GDTR_BASE,
546 VMX_VMCS_GUEST_IDTR_BASE,
547 VMX_VMCS_GUEST_DR7,
548 VMX_VMCS_GUEST_RSP,
549 VMX_VMCS_GUEST_RIP,
550 VMX_VMCS_GUEST_RFLAGS,
551 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
552 VMX_VMCS_GUEST_SYSENTER_ESP,
553 VMX_VMCS_GUEST_SYSENTER_EIP,
554 VMX_VMCS_GUEST_S_CET,
555 VMX_VMCS_GUEST_SSP,
556 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
557
558 /* Natural-width host-state fields */
559 VMX_VMCS_HOST_CR0,
560 VMX_VMCS_HOST_CR3,
561 VMX_VMCS_HOST_CR4,
562 VMX_VMCS_HOST_FS_BASE,
563 VMX_VMCS_HOST_GS_BASE,
564 VMX_VMCS_HOST_TR_BASE,
565 VMX_VMCS_HOST_GDTR_BASE,
566 VMX_VMCS_HOST_IDTR_BASE,
567 VMX_VMCS_HOST_SYSENTER_ESP,
568 VMX_VMCS_HOST_SYSENTER_EIP,
569 VMX_VMCS_HOST_RSP,
570 VMX_VMCS_HOST_RIP,
571 VMX_VMCS_HOST_S_CET,
572 VMX_VMCS_HOST_SSP,
573 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
574};
575#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
576
577#ifdef HMVMX_USE_FUNCTION_TABLE
578/**
579 * VMX_EXIT dispatch table.
580 */
581static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
582{
583 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
584 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
585 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
586 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
587 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
588 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
589 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
590 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
591 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
592 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
593 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
594 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
595 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
596 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
597 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
598 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
599 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
600 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
601 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
603 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
604 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
605 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
606 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
607 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
608 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
609 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
610 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
611 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
612#else
613 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
614 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
615 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
616 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
617 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
618 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
619 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
620 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
621 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
622#endif
623 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
624 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
625 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
626 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
627 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
628 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
629 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
630 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
632 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
633 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
634 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
635 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
636 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
637 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
639 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
640 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
641 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
642 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
643 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
644 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
646 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
647#else
648 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
651 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
653 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
654#else
655 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
658 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
659 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
660 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
661 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
662 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
663 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
664 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
665 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
666 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
667 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
668 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
669 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
670 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
671 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
672 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
673};
674#endif /* HMVMX_USE_FUNCTION_TABLE */
675
676#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
677static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
678{
679 /* 0 */ "(Not Used)",
680 /* 1 */ "VMCALL executed in VMX root operation.",
681 /* 2 */ "VMCLEAR with invalid physical address.",
682 /* 3 */ "VMCLEAR with VMXON pointer.",
683 /* 4 */ "VMLAUNCH with non-clear VMCS.",
684 /* 5 */ "VMRESUME with non-launched VMCS.",
685 /* 6 */ "VMRESUME after VMXOFF",
686 /* 7 */ "VM-entry with invalid control fields.",
687 /* 8 */ "VM-entry with invalid host state fields.",
688 /* 9 */ "VMPTRLD with invalid physical address.",
689 /* 10 */ "VMPTRLD with VMXON pointer.",
690 /* 11 */ "VMPTRLD with incorrect revision identifier.",
691 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
692 /* 13 */ "VMWRITE to read-only VMCS component.",
693 /* 14 */ "(Not Used)",
694 /* 15 */ "VMXON executed in VMX root operation.",
695 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
696 /* 17 */ "VM-entry with non-launched executing VMCS.",
697 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
698 /* 19 */ "VMCALL with non-clear VMCS.",
699 /* 20 */ "VMCALL with invalid VM-exit control fields.",
700 /* 21 */ "(Not Used)",
701 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
702 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
703 /* 24 */ "VMCALL with invalid SMM-monitor features.",
704 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
705 /* 26 */ "VM-entry with events blocked by MOV SS.",
706 /* 27 */ "(Not Used)",
707 /* 28 */ "Invalid operand to INVEPT/INVVPID."
708};
709#endif /* VBOX_STRICT && LOG_ENABLED */
710
711
712/**
713 * Gets the CR0 guest/host mask.
714 *
715 * These bits typically does not change through the lifetime of a VM. Any bit set in
716 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
717 * by the guest.
718 *
719 * @returns The CR0 guest/host mask.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
723{
724 /*
725 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
726 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
727 *
728 * Furthermore, modifications to any bits that are reserved/unspecified currently
729 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
730 * when future CPUs specify and use currently reserved/unspecified bits.
731 */
732 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
733 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
734 * and @bugref{6944}. */
735 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
736 AssertCompile(RT_HI_U32(VMX_EXIT_HOST_CR0_IGNORE_MASK) == UINT32_C(0xffffffff)); /* Paranoia. */
737 return ( X86_CR0_PE
738 | X86_CR0_NE
739 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
740 | X86_CR0_PG
741 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
742}
743
744
745/**
746 * Gets the CR4 guest/host mask.
747 *
748 * These bits typically does not change through the lifetime of a VM. Any bit set in
749 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
750 * by the guest.
751 *
752 * @returns The CR4 guest/host mask.
753 * @param pVCpu The cross context virtual CPU structure.
754 */
755static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
756{
757 /*
758 * We construct a mask of all CR4 bits that the guest can modify without causing
759 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
760 * a VM-exit when the guest attempts to modify them when executing using
761 * hardware-assisted VMX.
762 *
763 * When a feature is not exposed to the guest (and may be present on the host),
764 * we want to intercept guest modifications to the bit so we can emulate proper
765 * behavior (e.g., #GP).
766 *
767 * Furthermore, only modifications to those bits that don't require immediate
768 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
769 * depends on CR3 which might not always be the guest value while executing
770 * using hardware-assisted VMX.
771 */
772 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
773 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
774#ifdef IN_NEM_DARWIN
775 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
776#endif
777 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
778
779 /*
780 * Paranoia.
781 * Ensure features exposed to the guest are present on the host.
782 */
783 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
784#ifdef IN_NEM_DARWIN
785 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
786#endif
787 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
788
789 uint64_t const fGstMask = X86_CR4_PVI
790 | X86_CR4_TSD
791 | X86_CR4_DE
792 | X86_CR4_MCE
793 | X86_CR4_PCE
794 | X86_CR4_OSXMMEEXCPT
795 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
796#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
797 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
798 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
799#endif
800 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
801 return ~fGstMask;
802}
803
804
805/**
806 * Adds one or more exceptions to the exception bitmap and commits it to the current
807 * VMCS.
808 *
809 * @param pVCpu The cross context virtual CPU structure.
810 * @param pVmxTransient The VMX-transient structure.
811 * @param uXcptMask The exception(s) to add.
812 */
813static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
814{
815 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
816 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
817 if ((uXcptBitmap & uXcptMask) != uXcptMask)
818 {
819 uXcptBitmap |= uXcptMask;
820 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
821 AssertRC(rc);
822 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
823 }
824}
825
826
827/**
828 * Adds an exception to the exception bitmap and commits it to the current VMCS.
829 *
830 * @param pVCpu The cross context virtual CPU structure.
831 * @param pVmxTransient The VMX-transient structure.
832 * @param uXcpt The exception to add.
833 */
834static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
835{
836 Assert(uXcpt <= X86_XCPT_LAST);
837 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
838}
839
840
841/**
842 * Remove one or more exceptions from the exception bitmap and commits it to the
843 * current VMCS.
844 *
845 * This takes care of not removing the exception intercept if a nested-guest
846 * requires the exception to be intercepted.
847 *
848 * @returns VBox status code.
849 * @param pVCpu The cross context virtual CPU structure.
850 * @param pVmxTransient The VMX-transient structure.
851 * @param uXcptMask The exception(s) to remove.
852 */
853static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
854{
855 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
856 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
857 if (uXcptBitmap & uXcptMask)
858 {
859#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
860 if (!pVmxTransient->fIsNestedGuest)
861 { /* likely */ }
862 else
863 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
864#endif
865#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
866 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
867 | RT_BIT(X86_XCPT_DE)
868 | RT_BIT(X86_XCPT_NM)
869 | RT_BIT(X86_XCPT_TS)
870 | RT_BIT(X86_XCPT_UD)
871 | RT_BIT(X86_XCPT_NP)
872 | RT_BIT(X86_XCPT_SS)
873 | RT_BIT(X86_XCPT_GP)
874 | RT_BIT(X86_XCPT_PF)
875 | RT_BIT(X86_XCPT_MF));
876#elif defined(HMVMX_ALWAYS_TRAP_PF)
877 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
878#endif
879 if (uXcptMask)
880 {
881 /* Validate we are not removing any essential exception intercepts. */
882#ifndef IN_NEM_DARWIN
883 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
884#else
885 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
886#endif
887 NOREF(pVCpu);
888 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
889 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
890
891 /* Remove it from the exception bitmap. */
892 uXcptBitmap &= ~uXcptMask;
893
894 /* Commit and update the cache if necessary. */
895 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
896 {
897 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
898 AssertRC(rc);
899 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
900 }
901 }
902 }
903 return VINF_SUCCESS;
904}
905
906
907/**
908 * Remove an exceptions from the exception bitmap and commits it to the current
909 * VMCS.
910 *
911 * @returns VBox status code.
912 * @param pVCpu The cross context virtual CPU structure.
913 * @param pVmxTransient The VMX-transient structure.
914 * @param uXcpt The exception to remove.
915 */
916static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
917{
918 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
919}
920
921#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
922
923/**
924 * Loads the shadow VMCS specified by the VMCS info. object.
925 *
926 * @returns VBox status code.
927 * @param pVmcsInfo The VMCS info. object.
928 *
929 * @remarks Can be called with interrupts disabled.
930 */
931static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
932{
933 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
934 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
935
936 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
937 if (RT_SUCCESS(rc))
938 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
939 return rc;
940}
941
942
943/**
944 * Clears the shadow VMCS specified by the VMCS info. object.
945 *
946 * @returns VBox status code.
947 * @param pVmcsInfo The VMCS info. object.
948 *
949 * @remarks Can be called with interrupts disabled.
950 */
951static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
952{
953 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
954 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
955
956 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
957 if (RT_SUCCESS(rc))
958 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
959 return rc;
960}
961
962
963/**
964 * Switches from and to the specified VMCSes.
965 *
966 * @returns VBox status code.
967 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
968 * @param pVmcsInfoTo The VMCS info. object we are switching to.
969 *
970 * @remarks Called with interrupts disabled.
971 */
972static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
973{
974 /*
975 * Clear the VMCS we are switching out if it has not already been cleared.
976 * This will sync any CPU internal data back to the VMCS.
977 */
978 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
979 {
980 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
981 if (RT_SUCCESS(rc))
982 {
983 /*
984 * The shadow VMCS, if any, would not be active at this point since we
985 * would have cleared it while importing the virtual hardware-virtualization
986 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
987 * clear the shadow VMCS here, just assert for safety.
988 */
989 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
990 }
991 else
992 return rc;
993 }
994
995 /*
996 * Clear the VMCS we are switching to if it has not already been cleared.
997 * This will initialize the VMCS launch state to "clear" required for loading it.
998 *
999 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1000 */
1001 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1002 {
1003 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1004 if (RT_SUCCESS(rc))
1005 { /* likely */ }
1006 else
1007 return rc;
1008 }
1009
1010 /*
1011 * Finally, load the VMCS we are switching to.
1012 */
1013 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1014}
1015
1016
1017/**
1018 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1019 * caller.
1020 *
1021 * @returns VBox status code.
1022 * @param pVCpu The cross context virtual CPU structure.
1023 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1024 * true) or guest VMCS (pass false).
1025 */
1026static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1027{
1028 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1029 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1030
1031 PVMXVMCSINFO pVmcsInfoFrom;
1032 PVMXVMCSINFO pVmcsInfoTo;
1033 if (fSwitchToNstGstVmcs)
1034 {
1035 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1036 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1037 }
1038 else
1039 {
1040 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1041 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1042 }
1043
1044 /*
1045 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1046 * preemption hook code path acquires the current VMCS.
1047 */
1048 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1049
1050 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1051 if (RT_SUCCESS(rc))
1052 {
1053 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1054 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1055
1056 /*
1057 * If we are switching to a VMCS that was executed on a different host CPU or was
1058 * never executed before, flag that we need to export the host state before executing
1059 * guest/nested-guest code using hardware-assisted VMX.
1060 *
1061 * This could probably be done in a preemptible context since the preemption hook
1062 * will flag the necessary change in host context. However, since preemption is
1063 * already disabled and to avoid making assumptions about host specific code in
1064 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1065 * disabled.
1066 */
1067 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1068 { /* likely */ }
1069 else
1070 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1071
1072 ASMSetFlags(fEFlags);
1073
1074 /*
1075 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1076 * flag that we need to update the host MSR values there. Even if we decide in the
1077 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1078 * if its content differs, we would have to update the host MSRs anyway.
1079 */
1080 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1081 }
1082 else
1083 ASMSetFlags(fEFlags);
1084 return rc;
1085}
1086
1087#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1088#ifdef VBOX_STRICT
1089
1090/**
1091 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1092 * transient structure.
1093 *
1094 * @param pVCpu The cross context virtual CPU structure.
1095 * @param pVmxTransient The VMX-transient structure.
1096 */
1097DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1098{
1099 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1100 AssertRC(rc);
1101}
1102
1103
1104/**
1105 * Reads the VM-entry exception error code field from the VMCS into
1106 * the VMX transient structure.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param pVmxTransient The VMX-transient structure.
1110 */
1111DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1112{
1113 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1114 AssertRC(rc);
1115}
1116
1117
1118/**
1119 * Reads the VM-entry exception error code field from the VMCS into
1120 * the VMX transient structure.
1121 *
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param pVmxTransient The VMX-transient structure.
1124 */
1125DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1126{
1127 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1128 AssertRC(rc);
1129}
1130
1131#endif /* VBOX_STRICT */
1132
1133
1134/**
1135 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1136 *
1137 * Don't call directly unless the it's likely that some or all of the fields
1138 * given in @a a_fReadMask have already been read.
1139 *
1140 * @tparam a_fReadMask The fields to read.
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param pVmxTransient The VMX-transient structure.
1143 */
1144template<uint32_t const a_fReadMask>
1145static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1146{
1147 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1148 | HMVMX_READ_EXIT_INSTR_LEN
1149 | HMVMX_READ_EXIT_INSTR_INFO
1150 | HMVMX_READ_IDT_VECTORING_INFO
1151 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1152 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1153 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1154 | HMVMX_READ_GUEST_LINEAR_ADDR
1155 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1156 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1157 )) == 0);
1158
1159 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1160 {
1161 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1162
1163 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1164 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1165 {
1166 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1167 AssertRC(rc);
1168 }
1169 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1170 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1171 {
1172 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1173 AssertRC(rc);
1174 }
1175 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1176 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1177 {
1178 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1179 AssertRC(rc);
1180 }
1181 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1182 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1183 {
1184 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1185 AssertRC(rc);
1186 }
1187 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1188 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1189 {
1190 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1191 AssertRC(rc);
1192 }
1193 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1194 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1195 {
1196 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1197 AssertRC(rc);
1198 }
1199 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1200 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1201 {
1202 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1203 AssertRC(rc);
1204 }
1205 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1206 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1207 {
1208 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1209 AssertRC(rc);
1210 }
1211 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1212 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1213 {
1214 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1215 AssertRC(rc);
1216 }
1217 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1218 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1219 {
1220 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1221 AssertRC(rc);
1222 }
1223
1224 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1225 }
1226}
1227
1228
1229/**
1230 * Reads VMCS fields into the VMXTRANSIENT structure.
1231 *
1232 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1233 * generating an optimized read sequences w/o any conditionals between in
1234 * non-strict builds.
1235 *
1236 * @tparam a_fReadMask The fields to read. One or more of the
1237 * HMVMX_READ_XXX fields ORed together.
1238 * @param pVCpu The cross context virtual CPU structure.
1239 * @param pVmxTransient The VMX-transient structure.
1240 */
1241template<uint32_t const a_fReadMask>
1242DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1243{
1244 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1245 | HMVMX_READ_EXIT_INSTR_LEN
1246 | HMVMX_READ_EXIT_INSTR_INFO
1247 | HMVMX_READ_IDT_VECTORING_INFO
1248 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1249 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1250 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1251 | HMVMX_READ_GUEST_LINEAR_ADDR
1252 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1253 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1254 )) == 0);
1255
1256 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1257 {
1258 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1259 {
1260 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1261 AssertRC(rc);
1262 }
1263 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1264 {
1265 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1266 AssertRC(rc);
1267 }
1268 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1269 {
1270 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1271 AssertRC(rc);
1272 }
1273 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1274 {
1275 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1276 AssertRC(rc);
1277 }
1278 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1279 {
1280 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1281 AssertRC(rc);
1282 }
1283 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1284 {
1285 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1286 AssertRC(rc);
1287 }
1288 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1289 {
1290 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1291 AssertRC(rc);
1292 }
1293 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1294 {
1295 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1296 AssertRC(rc);
1297 }
1298 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1299 {
1300 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1301 AssertRC(rc);
1302 }
1303 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1304 {
1305 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1306 AssertRC(rc);
1307 }
1308
1309 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1310 }
1311 else
1312 {
1313 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1314 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1315 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1316 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1317 }
1318}
1319
1320
1321#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1322/**
1323 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1324 *
1325 * @param pVCpu The cross context virtual CPU structure.
1326 * @param pVmxTransient The VMX-transient structure.
1327 */
1328static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1329{
1330 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1331 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1332 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1333 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1334 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1336 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1337 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1338 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1339 AssertRC(rc);
1340 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1341 | HMVMX_READ_EXIT_INSTR_LEN
1342 | HMVMX_READ_EXIT_INSTR_INFO
1343 | HMVMX_READ_IDT_VECTORING_INFO
1344 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1345 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1346 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1347 | HMVMX_READ_GUEST_LINEAR_ADDR
1348 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1349}
1350#endif
1351
1352/**
1353 * Verifies that our cached values of the VMCS fields are all consistent with
1354 * what's actually present in the VMCS.
1355 *
1356 * @returns VBox status code.
1357 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1358 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1359 * VMCS content. HMCPU error-field is
1360 * updated, see VMX_VCI_XXX.
1361 * @param pVCpu The cross context virtual CPU structure.
1362 * @param pVmcsInfo The VMCS info. object.
1363 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1364 */
1365static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1366{
1367 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1368
1369 uint32_t u32Val;
1370 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1371 AssertRC(rc);
1372 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1373 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1374 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1375 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1376
1377 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1378 AssertRC(rc);
1379 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1380 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1381 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1382 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1383
1384 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1385 AssertRC(rc);
1386 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1387 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1388 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1389 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1390
1391 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1392 AssertRC(rc);
1393 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1394 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1395 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1396 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1397
1398 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1399 {
1400 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1401 AssertRC(rc);
1402 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1403 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1404 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1405 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1406 }
1407
1408 uint64_t u64Val;
1409 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1410 {
1411 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1414 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417 }
1418
1419 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1420 AssertRC(rc);
1421 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1422 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1423 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1424 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1425
1426 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1427 AssertRC(rc);
1428 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1429 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1430 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1431 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1432
1433 NOREF(pcszVmcs);
1434 return VINF_SUCCESS;
1435}
1436
1437
1438/**
1439 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1440 * VMCS.
1441 *
1442 * This is typically required when the guest changes paging mode.
1443 *
1444 * @returns VBox status code.
1445 * @param pVCpu The cross context virtual CPU structure.
1446 * @param pVmxTransient The VMX-transient structure.
1447 *
1448 * @remarks Requires EFER.
1449 * @remarks No-long-jump zone!!!
1450 */
1451static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1452{
1453 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1454 {
1455 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1456 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1457
1458 /*
1459 * VM-entry controls.
1460 */
1461 {
1462 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1463 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1464
1465 /*
1466 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1467 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1468 *
1469 * For nested-guests, this is a mandatory VM-entry control. It's also
1470 * required because we do not want to leak host bits to the nested-guest.
1471 */
1472 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1473
1474 /*
1475 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1476 *
1477 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1478 * required to get the nested-guest working with hardware-assisted VMX execution.
1479 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1480 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1481 * here rather than while merging the guest VMCS controls.
1482 */
1483 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1484 {
1485 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1486 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1487 }
1488 else
1489 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1490
1491 /*
1492 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1493 *
1494 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1495 * regardless of whether the nested-guest VMCS specifies it because we are free to
1496 * load whatever MSRs we require and we do not need to modify the guest visible copy
1497 * of the VM-entry MSR load area.
1498 */
1499 if ( g_fHmVmxSupportsVmcsEfer
1500#ifndef IN_NEM_DARWIN
1501 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1502#endif
1503 )
1504 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1505 else
1506 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1507
1508 /*
1509 * The following should -not- be set (since we're not in SMM mode):
1510 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1511 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1512 */
1513
1514 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1515 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1516
1517 if ((fVal & fZap) == fVal)
1518 { /* likely */ }
1519 else
1520 {
1521 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1522 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1523 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1524 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1525 }
1526
1527 /* Commit it to the VMCS. */
1528 if (pVmcsInfo->u32EntryCtls != fVal)
1529 {
1530 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1531 AssertRC(rc);
1532 pVmcsInfo->u32EntryCtls = fVal;
1533 }
1534 }
1535
1536 /*
1537 * VM-exit controls.
1538 */
1539 {
1540 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1541 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1542
1543 /*
1544 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1545 * supported the 1-setting of this bit.
1546 *
1547 * For nested-guests, we set the "save debug controls" as the converse
1548 * "load debug controls" is mandatory for nested-guests anyway.
1549 */
1550 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1551
1552 /*
1553 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1554 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1555 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1556 * vmxHCExportHostMsrs().
1557 *
1558 * For nested-guests, we always set this bit as we do not support 32-bit
1559 * hosts.
1560 */
1561 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1562
1563#ifndef IN_NEM_DARWIN
1564 /*
1565 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1566 *
1567 * For nested-guests, we should use the "save IA32_EFER" control if we also
1568 * used the "load IA32_EFER" control while exporting VM-entry controls.
1569 */
1570 if ( g_fHmVmxSupportsVmcsEfer
1571 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1572 {
1573 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1574 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1575 }
1576#endif
1577
1578 /*
1579 * Enable saving of the VMX-preemption timer value on VM-exit.
1580 * For nested-guests, currently not exposed/used.
1581 */
1582 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1583 * the timer value. */
1584 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1585 {
1586 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1587 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1588 }
1589
1590 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1591 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1592
1593 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1594 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1595 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1596
1597 if ((fVal & fZap) == fVal)
1598 { /* likely */ }
1599 else
1600 {
1601 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1602 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1603 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1604 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1605 }
1606
1607 /* Commit it to the VMCS. */
1608 if (pVmcsInfo->u32ExitCtls != fVal)
1609 {
1610 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1611 AssertRC(rc);
1612 pVmcsInfo->u32ExitCtls = fVal;
1613 }
1614 }
1615
1616 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1617 }
1618 return VINF_SUCCESS;
1619}
1620
1621
1622/**
1623 * Sets the TPR threshold in the VMCS.
1624 *
1625 * @param pVCpu The cross context virtual CPU structure.
1626 * @param pVmcsInfo The VMCS info. object.
1627 * @param u32TprThreshold The TPR threshold (task-priority class only).
1628 */
1629DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1630{
1631 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1632 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1633 RT_NOREF(pVmcsInfo);
1634 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1635 AssertRC(rc);
1636}
1637
1638
1639/**
1640 * Exports the guest APIC TPR state into the VMCS.
1641 *
1642 * @param pVCpu The cross context virtual CPU structure.
1643 * @param pVmxTransient The VMX-transient structure.
1644 *
1645 * @remarks No-long-jump zone!!!
1646 */
1647static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1648{
1649 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1650 {
1651 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1652
1653 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1654 if (!pVmxTransient->fIsNestedGuest)
1655 {
1656 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1657 && APICIsEnabled(pVCpu))
1658 {
1659 /*
1660 * Setup TPR shadowing.
1661 */
1662 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1663 {
1664 bool fPendingIntr = false;
1665 uint8_t u8Tpr = 0;
1666 uint8_t u8PendingIntr = 0;
1667 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1668 AssertRC(rc);
1669
1670 /*
1671 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1672 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1673 * priority of the pending interrupt so we can deliver the interrupt. If there
1674 * are no interrupts pending, set threshold to 0 to not cause any
1675 * TPR-below-threshold VM-exits.
1676 */
1677 uint32_t u32TprThreshold = 0;
1678 if (fPendingIntr)
1679 {
1680 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1681 (which is the Task-Priority Class). */
1682 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1683 const uint8_t u8TprPriority = u8Tpr >> 4;
1684 if (u8PendingPriority <= u8TprPriority)
1685 u32TprThreshold = u8PendingPriority;
1686 }
1687
1688 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1689 }
1690 }
1691 }
1692 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1693 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1694 }
1695}
1696
1697
1698/**
1699 * Gets the guest interruptibility-state and updates related internal eflags
1700 * inhibition state.
1701 *
1702 * @returns Guest's interruptibility-state.
1703 * @param pVCpu The cross context virtual CPU structure.
1704 *
1705 * @remarks No-long-jump zone!!!
1706 */
1707static uint32_t vmxHCGetGuestIntrStateWithUpdate(PVMCPUCC pVCpu)
1708{
1709 uint32_t fIntrState;
1710
1711 /*
1712 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1713 */
1714 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1715 fIntrState = 0;
1716 else
1717 {
1718 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1719 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1720
1721 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1722 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1723 else
1724 {
1725 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1726
1727 /* Block-by-STI must not be set when interrupts are disabled. */
1728 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1729 }
1730 }
1731
1732 /*
1733 * Check if we should inhibit NMI delivery.
1734 */
1735 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1736 { /* likely */ }
1737 else
1738 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1739
1740 /*
1741 * Validate.
1742 */
1743 /* We don't support block-by-SMI yet.*/
1744 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1745
1746 return fIntrState;
1747}
1748
1749
1750/**
1751 * Exports the exception intercepts required for guest execution in the VMCS.
1752 *
1753 * @param pVCpu The cross context virtual CPU structure.
1754 * @param pVmxTransient The VMX-transient structure.
1755 *
1756 * @remarks No-long-jump zone!!!
1757 */
1758static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1759{
1760 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1761 {
1762 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1763 if ( !pVmxTransient->fIsNestedGuest
1764 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1765 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1766 else
1767 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1768
1769 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1770 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1771 }
1772}
1773
1774
1775/**
1776 * Exports the guest's RIP into the guest-state area in the VMCS.
1777 *
1778 * @param pVCpu The cross context virtual CPU structure.
1779 *
1780 * @remarks No-long-jump zone!!!
1781 */
1782static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1783{
1784 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1785 {
1786 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1787
1788 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1789 AssertRC(rc);
1790
1791 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1792 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1793 }
1794}
1795
1796
1797/**
1798 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1799 *
1800 * @param pVCpu The cross context virtual CPU structure.
1801 * @param pVmxTransient The VMX-transient structure.
1802 *
1803 * @remarks No-long-jump zone!!!
1804 */
1805static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1806{
1807 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1808 {
1809 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1810
1811 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1812 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1813 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1814 Use 32-bit VMWRITE. */
1815 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1816 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1817 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1818
1819#ifndef IN_NEM_DARWIN
1820 /*
1821 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1822 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1823 * can run the real-mode guest code under Virtual 8086 mode.
1824 */
1825 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1826 if (pVmcsInfo->RealMode.fRealOnV86Active)
1827 {
1828 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1829 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1830 Assert(!pVmxTransient->fIsNestedGuest);
1831 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1832 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1833 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1834 }
1835#else
1836 RT_NOREF(pVmxTransient);
1837#endif
1838
1839 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1840 AssertRC(rc);
1841
1842 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1843 Log4Func(("eflags=%#RX32\n", fEFlags));
1844 }
1845}
1846
1847
1848#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1849/**
1850 * Copies the nested-guest VMCS to the shadow VMCS.
1851 *
1852 * @returns VBox status code.
1853 * @param pVCpu The cross context virtual CPU structure.
1854 * @param pVmcsInfo The VMCS info. object.
1855 *
1856 * @remarks No-long-jump zone!!!
1857 */
1858static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1859{
1860 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1861 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1862
1863 /*
1864 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1865 * current VMCS, as we may try saving guest lazy MSRs.
1866 *
1867 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1868 * calling the import VMCS code which is currently performing the guest MSR reads
1869 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1870 * and the rest of the VMX leave session machinery.
1871 */
1872 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1873
1874 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1875 if (RT_SUCCESS(rc))
1876 {
1877 /*
1878 * Copy all guest read/write VMCS fields.
1879 *
1880 * We don't check for VMWRITE failures here for performance reasons and
1881 * because they are not expected to fail, barring irrecoverable conditions
1882 * like hardware errors.
1883 */
1884 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1885 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1886 {
1887 uint64_t u64Val;
1888 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1889 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1890 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1891 }
1892
1893 /*
1894 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1895 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1896 */
1897 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1898 {
1899 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1900 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1901 {
1902 uint64_t u64Val;
1903 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1904 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1905 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1906 }
1907 }
1908
1909 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1910 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1911 }
1912
1913 ASMSetFlags(fEFlags);
1914 return rc;
1915}
1916
1917
1918/**
1919 * Copies the shadow VMCS to the nested-guest VMCS.
1920 *
1921 * @returns VBox status code.
1922 * @param pVCpu The cross context virtual CPU structure.
1923 * @param pVmcsInfo The VMCS info. object.
1924 *
1925 * @remarks Called with interrupts disabled.
1926 */
1927static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1928{
1929 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1930 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1931 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1932
1933 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1934 if (RT_SUCCESS(rc))
1935 {
1936 /*
1937 * Copy guest read/write fields from the shadow VMCS.
1938 * Guest read-only fields cannot be modified, so no need to copy them.
1939 *
1940 * We don't check for VMREAD failures here for performance reasons and
1941 * because they are not expected to fail, barring irrecoverable conditions
1942 * like hardware errors.
1943 */
1944 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1945 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1946 {
1947 uint64_t u64Val;
1948 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1949 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1950 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1951 }
1952
1953 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1954 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1955 }
1956 return rc;
1957}
1958
1959
1960/**
1961 * Enables VMCS shadowing for the given VMCS info. object.
1962 *
1963 * @param pVCpu The cross context virtual CPU structure.
1964 * @param pVmcsInfo The VMCS info. object.
1965 *
1966 * @remarks No-long-jump zone!!!
1967 */
1968static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1969{
1970 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1971 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1972 {
1973 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1974 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1975 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1976 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1977 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1978 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1979 Log4Func(("Enabled\n"));
1980 }
1981}
1982
1983
1984/**
1985 * Disables VMCS shadowing for the given VMCS info. object.
1986 *
1987 * @param pVCpu The cross context virtual CPU structure.
1988 * @param pVmcsInfo The VMCS info. object.
1989 *
1990 * @remarks No-long-jump zone!!!
1991 */
1992static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1993{
1994 /*
1995 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1996 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1997 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1998 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1999 *
2000 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2001 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2002 */
2003 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2004 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2005 {
2006 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2007 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2008 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2009 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2010 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2011 Log4Func(("Disabled\n"));
2012 }
2013}
2014#endif
2015
2016
2017/**
2018 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2019 *
2020 * The guest FPU state is always pre-loaded hence we don't need to bother about
2021 * sharing FPU related CR0 bits between the guest and host.
2022 *
2023 * @returns VBox status code.
2024 * @param pVCpu The cross context virtual CPU structure.
2025 * @param pVmxTransient The VMX-transient structure.
2026 *
2027 * @remarks No-long-jump zone!!!
2028 */
2029static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2030{
2031 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2032 {
2033 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2034 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2035
2036 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2037 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2038 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2039 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2040 else
2041 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2042
2043 if (!pVmxTransient->fIsNestedGuest)
2044 {
2045 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2046 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2047 uint64_t const u64ShadowCr0 = u64GuestCr0;
2048 Assert(!RT_HI_U32(u64GuestCr0));
2049
2050 /*
2051 * Setup VT-x's view of the guest CR0.
2052 */
2053 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2054 if (VM_IS_VMX_NESTED_PAGING(pVM))
2055 {
2056#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2057 if (CPUMIsGuestPagingEnabled(pVCpu))
2058 {
2059 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2060 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2061 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2062 }
2063 else
2064 {
2065 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2066 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2067 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2068 }
2069
2070 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2071 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2072 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2073#endif
2074 }
2075 else
2076 {
2077 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2078 u64GuestCr0 |= X86_CR0_WP;
2079 }
2080
2081 /*
2082 * Guest FPU bits.
2083 *
2084 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2085 * using CR0.TS.
2086 *
2087 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2088 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2089 */
2090 u64GuestCr0 |= X86_CR0_NE;
2091
2092 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2093 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2094
2095 /*
2096 * Update exception intercepts.
2097 */
2098 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2099#ifndef IN_NEM_DARWIN
2100 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2101 {
2102 Assert(PDMVmmDevHeapIsEnabled(pVM));
2103 Assert(pVM->hm.s.vmx.pRealModeTSS);
2104 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2105 }
2106 else
2107#endif
2108 {
2109 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2110 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2111 if (fInterceptMF)
2112 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2113 }
2114
2115 /* Additional intercepts for debugging, define these yourself explicitly. */
2116#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2117 uXcptBitmap |= 0
2118 | RT_BIT(X86_XCPT_BP)
2119 | RT_BIT(X86_XCPT_DE)
2120 | RT_BIT(X86_XCPT_NM)
2121 | RT_BIT(X86_XCPT_TS)
2122 | RT_BIT(X86_XCPT_UD)
2123 | RT_BIT(X86_XCPT_NP)
2124 | RT_BIT(X86_XCPT_SS)
2125 | RT_BIT(X86_XCPT_GP)
2126 | RT_BIT(X86_XCPT_PF)
2127 | RT_BIT(X86_XCPT_MF)
2128 ;
2129#elif defined(HMVMX_ALWAYS_TRAP_PF)
2130 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2131#endif
2132 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2133 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2134 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2135 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2136 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2137
2138 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2139 u64GuestCr0 |= fSetCr0;
2140 u64GuestCr0 &= fZapCr0;
2141 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2142
2143 Assert(!RT_HI_U32(u64GuestCr0));
2144 Assert(u64GuestCr0 & X86_CR0_NE);
2145
2146 /* Commit the CR0 and related fields to the guest VMCS. */
2147 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2148 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2149 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2150 {
2151 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2152 AssertRC(rc);
2153 }
2154 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2155 {
2156 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2157 AssertRC(rc);
2158 }
2159
2160 /* Update our caches. */
2161 pVmcsInfo->u32ProcCtls = uProcCtls;
2162 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2163
2164 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2165 }
2166 else
2167 {
2168 /*
2169 * With nested-guests, we may have extended the guest/host mask here since we
2170 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2171 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2172 * originally supplied. We must copy those bits from the nested-guest CR0 into
2173 * the nested-guest CR0 read-shadow.
2174 */
2175 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2176 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2177 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2178
2179 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2180 u64GuestCr0 |= fSetCr0;
2181 u64GuestCr0 &= fZapCr0;
2182 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2183
2184 Assert(!RT_HI_U32(u64GuestCr0));
2185 Assert(u64GuestCr0 & X86_CR0_NE);
2186
2187 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2188 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2189 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2190
2191 Log4Func(("cr0=%#RX64 shadow=%#RX64 vmcs_read_shw=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0,
2192 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u, fSetCr0, fZapCr0));
2193 }
2194
2195 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2196 }
2197
2198 return VINF_SUCCESS;
2199}
2200
2201
2202/**
2203 * Exports the guest control registers (CR3, CR4) into the guest-state area
2204 * in the VMCS.
2205 *
2206 * @returns VBox strict status code.
2207 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2208 * without unrestricted guest access and the VMMDev is not presently
2209 * mapped (e.g. EFI32).
2210 *
2211 * @param pVCpu The cross context virtual CPU structure.
2212 * @param pVmxTransient The VMX-transient structure.
2213 *
2214 * @remarks No-long-jump zone!!!
2215 */
2216static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2217{
2218 int rc = VINF_SUCCESS;
2219 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2220
2221 /*
2222 * Guest CR2.
2223 * It's always loaded in the assembler code. Nothing to do here.
2224 */
2225
2226 /*
2227 * Guest CR3.
2228 */
2229 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2230 {
2231 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2232
2233 if (VM_IS_VMX_NESTED_PAGING(pVM))
2234 {
2235#ifndef IN_NEM_DARWIN
2236 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2237 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2238
2239 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2240 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2241 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2242 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2243
2244 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2245 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2246 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2247
2248 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2249 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2250 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2251 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2252 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2253 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2254 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2255
2256 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2257 AssertRC(rc);
2258#endif
2259
2260 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2261 uint64_t u64GuestCr3 = pCtx->cr3;
2262 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2263 || CPUMIsGuestPagingEnabledEx(pCtx))
2264 {
2265 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2266 if (CPUMIsGuestInPAEModeEx(pCtx))
2267 {
2268 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2269 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2270 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2271 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2272 }
2273
2274 /*
2275 * The guest's view of its CR3 is unblemished with nested paging when the
2276 * guest is using paging or we have unrestricted guest execution to handle
2277 * the guest when it's not using paging.
2278 */
2279 }
2280#ifndef IN_NEM_DARWIN
2281 else
2282 {
2283 /*
2284 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2285 * thinks it accesses physical memory directly, we use our identity-mapped
2286 * page table to map guest-linear to guest-physical addresses. EPT takes care
2287 * of translating it to host-physical addresses.
2288 */
2289 RTGCPHYS GCPhys;
2290 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2291
2292 /* We obtain it here every time as the guest could have relocated this PCI region. */
2293 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2294 if (RT_SUCCESS(rc))
2295 { /* likely */ }
2296 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2297 {
2298 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2299 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2300 }
2301 else
2302 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2303
2304 u64GuestCr3 = GCPhys;
2305 }
2306#endif
2307
2308 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2309 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2310 AssertRC(rc);
2311 }
2312 else
2313 {
2314 Assert(!pVmxTransient->fIsNestedGuest);
2315 /* Non-nested paging case, just use the hypervisor's CR3. */
2316 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2317
2318 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2319 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2320 AssertRC(rc);
2321 }
2322
2323 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2324 }
2325
2326 /*
2327 * Guest CR4.
2328 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2329 */
2330 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2331 {
2332 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2333 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2334
2335 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2336 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2337
2338 /*
2339 * With nested-guests, we may have extended the guest/host mask here (since we
2340 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2341 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2342 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2343 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2344 */
2345 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2346 uint64_t u64GuestCr4 = pCtx->cr4;
2347 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2348 ? pCtx->cr4
2349 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2350 Assert(!RT_HI_U32(u64GuestCr4));
2351
2352#ifndef IN_NEM_DARWIN
2353 /*
2354 * Setup VT-x's view of the guest CR4.
2355 *
2356 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2357 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2358 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2359 *
2360 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2361 */
2362 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2363 {
2364 Assert(pVM->hm.s.vmx.pRealModeTSS);
2365 Assert(PDMVmmDevHeapIsEnabled(pVM));
2366 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2367 }
2368#endif
2369
2370 if (VM_IS_VMX_NESTED_PAGING(pVM))
2371 {
2372 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2373 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2374 {
2375 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2376 u64GuestCr4 |= X86_CR4_PSE;
2377 /* Our identity mapping is a 32-bit page directory. */
2378 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2379 }
2380 /* else use guest CR4.*/
2381 }
2382 else
2383 {
2384 Assert(!pVmxTransient->fIsNestedGuest);
2385
2386 /*
2387 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2388 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2389 */
2390 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2391 {
2392 case PGMMODE_REAL: /* Real-mode. */
2393 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2394 case PGMMODE_32_BIT: /* 32-bit paging. */
2395 {
2396 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2397 break;
2398 }
2399
2400 case PGMMODE_PAE: /* PAE paging. */
2401 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2402 {
2403 u64GuestCr4 |= X86_CR4_PAE;
2404 break;
2405 }
2406
2407 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2408 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2409 {
2410#ifdef VBOX_WITH_64_BITS_GUESTS
2411 /* For our assumption in vmxHCShouldSwapEferMsr. */
2412 Assert(u64GuestCr4 & X86_CR4_PAE);
2413 break;
2414#endif
2415 }
2416 default:
2417 AssertFailed();
2418 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2419 }
2420 }
2421
2422 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2423 u64GuestCr4 |= fSetCr4;
2424 u64GuestCr4 &= fZapCr4;
2425
2426 Assert(!RT_HI_U32(u64GuestCr4));
2427 Assert(u64GuestCr4 & X86_CR4_VMXE);
2428
2429 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2430 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2431 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2432
2433#ifndef IN_NEM_DARWIN
2434 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2435 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2436 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2437 {
2438 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2439 hmR0VmxUpdateStartVmFunction(pVCpu);
2440 }
2441#endif
2442
2443 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2444
2445 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2446 }
2447 return rc;
2448}
2449
2450
2451#ifdef VBOX_STRICT
2452/**
2453 * Strict function to validate segment registers.
2454 *
2455 * @param pVCpu The cross context virtual CPU structure.
2456 * @param pVmcsInfo The VMCS info. object.
2457 *
2458 * @remarks Will import guest CR0 on strict builds during validation of
2459 * segments.
2460 */
2461static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2462{
2463 /*
2464 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2465 *
2466 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2467 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2468 * unusable bit and doesn't change the guest-context value.
2469 */
2470 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2471 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2472 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2473 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2474 && ( !CPUMIsGuestInRealModeEx(pCtx)
2475 && !CPUMIsGuestInV86ModeEx(pCtx)))
2476 {
2477 /* Protected mode checks */
2478 /* CS */
2479 Assert(pCtx->cs.Attr.n.u1Present);
2480 Assert(!(pCtx->cs.Attr.u & 0xf00));
2481 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2482 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2483 || !(pCtx->cs.Attr.n.u1Granularity));
2484 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2485 || (pCtx->cs.Attr.n.u1Granularity));
2486 /* CS cannot be loaded with NULL in protected mode. */
2487 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2488 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2489 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2490 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2491 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2492 else
2493 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2494 /* SS */
2495 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2496 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2497 if ( !(pCtx->cr0 & X86_CR0_PE)
2498 || pCtx->cs.Attr.n.u4Type == 3)
2499 {
2500 Assert(!pCtx->ss.Attr.n.u2Dpl);
2501 }
2502 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2503 {
2504 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2505 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2506 Assert(pCtx->ss.Attr.n.u1Present);
2507 Assert(!(pCtx->ss.Attr.u & 0xf00));
2508 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2509 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2510 || !(pCtx->ss.Attr.n.u1Granularity));
2511 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2512 || (pCtx->ss.Attr.n.u1Granularity));
2513 }
2514 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2515 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2516 {
2517 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2518 Assert(pCtx->ds.Attr.n.u1Present);
2519 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2520 Assert(!(pCtx->ds.Attr.u & 0xf00));
2521 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2522 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2523 || !(pCtx->ds.Attr.n.u1Granularity));
2524 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2525 || (pCtx->ds.Attr.n.u1Granularity));
2526 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2527 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2528 }
2529 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2530 {
2531 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2532 Assert(pCtx->es.Attr.n.u1Present);
2533 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2534 Assert(!(pCtx->es.Attr.u & 0xf00));
2535 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2536 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2537 || !(pCtx->es.Attr.n.u1Granularity));
2538 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2539 || (pCtx->es.Attr.n.u1Granularity));
2540 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2541 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2542 }
2543 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2544 {
2545 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2546 Assert(pCtx->fs.Attr.n.u1Present);
2547 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2548 Assert(!(pCtx->fs.Attr.u & 0xf00));
2549 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2550 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2551 || !(pCtx->fs.Attr.n.u1Granularity));
2552 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2553 || (pCtx->fs.Attr.n.u1Granularity));
2554 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2555 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2556 }
2557 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2558 {
2559 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2560 Assert(pCtx->gs.Attr.n.u1Present);
2561 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2562 Assert(!(pCtx->gs.Attr.u & 0xf00));
2563 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2564 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2565 || !(pCtx->gs.Attr.n.u1Granularity));
2566 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2567 || (pCtx->gs.Attr.n.u1Granularity));
2568 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2569 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2570 }
2571 /* 64-bit capable CPUs. */
2572 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2573 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2574 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2575 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2576 }
2577 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2578 || ( CPUMIsGuestInRealModeEx(pCtx)
2579 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2580 {
2581 /* Real and v86 mode checks. */
2582 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2583 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2584#ifndef IN_NEM_DARWIN
2585 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2586 {
2587 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2588 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2589 }
2590 else
2591#endif
2592 {
2593 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2594 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2595 }
2596
2597 /* CS */
2598 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2599 Assert(pCtx->cs.u32Limit == 0xffff);
2600 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2601 /* SS */
2602 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2603 Assert(pCtx->ss.u32Limit == 0xffff);
2604 Assert(u32SSAttr == 0xf3);
2605 /* DS */
2606 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2607 Assert(pCtx->ds.u32Limit == 0xffff);
2608 Assert(u32DSAttr == 0xf3);
2609 /* ES */
2610 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2611 Assert(pCtx->es.u32Limit == 0xffff);
2612 Assert(u32ESAttr == 0xf3);
2613 /* FS */
2614 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2615 Assert(pCtx->fs.u32Limit == 0xffff);
2616 Assert(u32FSAttr == 0xf3);
2617 /* GS */
2618 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2619 Assert(pCtx->gs.u32Limit == 0xffff);
2620 Assert(u32GSAttr == 0xf3);
2621 /* 64-bit capable CPUs. */
2622 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2623 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2624 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2625 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2626 }
2627}
2628#endif /* VBOX_STRICT */
2629
2630
2631/**
2632 * Exports a guest segment register into the guest-state area in the VMCS.
2633 *
2634 * @returns VBox status code.
2635 * @param pVCpu The cross context virtual CPU structure.
2636 * @param pVmcsInfo The VMCS info. object.
2637 * @param iSegReg The segment register number (X86_SREG_XXX).
2638 * @param pSelReg Pointer to the segment selector.
2639 *
2640 * @remarks No-long-jump zone!!!
2641 */
2642static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2643{
2644 Assert(iSegReg < X86_SREG_COUNT);
2645
2646 uint32_t u32Access = pSelReg->Attr.u;
2647#ifndef IN_NEM_DARWIN
2648 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2649#endif
2650 {
2651 /*
2652 * The way to differentiate between whether this is really a null selector or was just
2653 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2654 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2655 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2656 * NULL selectors loaded in protected-mode have their attribute as 0.
2657 */
2658 if (u32Access)
2659 { }
2660 else
2661 u32Access = X86DESCATTR_UNUSABLE;
2662 }
2663#ifndef IN_NEM_DARWIN
2664 else
2665 {
2666 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2667 u32Access = 0xf3;
2668 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2669 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2670 RT_NOREF_PV(pVCpu);
2671 }
2672#else
2673 RT_NOREF(pVmcsInfo);
2674#endif
2675
2676 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2677 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2678 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2679
2680 /*
2681 * Commit it to the VMCS.
2682 */
2683 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2684 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2685 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2686 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2687 return VINF_SUCCESS;
2688}
2689
2690
2691/**
2692 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2693 * area in the VMCS.
2694 *
2695 * @returns VBox status code.
2696 * @param pVCpu The cross context virtual CPU structure.
2697 * @param pVmxTransient The VMX-transient structure.
2698 *
2699 * @remarks Will import guest CR0 on strict builds during validation of
2700 * segments.
2701 * @remarks No-long-jump zone!!!
2702 */
2703static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2704{
2705 int rc = VERR_INTERNAL_ERROR_5;
2706#ifndef IN_NEM_DARWIN
2707 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2708#endif
2709 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2710 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2711#ifndef IN_NEM_DARWIN
2712 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2713#endif
2714
2715 /*
2716 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2717 */
2718 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2719 {
2720 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2721 {
2722 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2723#ifndef IN_NEM_DARWIN
2724 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2725 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2726#endif
2727 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2728 AssertRC(rc);
2729 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2730 }
2731
2732 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2733 {
2734 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2735#ifndef IN_NEM_DARWIN
2736 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2737 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2738#endif
2739 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2740 AssertRC(rc);
2741 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2742 }
2743
2744 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2745 {
2746 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2747#ifndef IN_NEM_DARWIN
2748 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2749 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2750#endif
2751 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2752 AssertRC(rc);
2753 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2754 }
2755
2756 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2757 {
2758 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2759#ifndef IN_NEM_DARWIN
2760 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2761 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2762#endif
2763 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2764 AssertRC(rc);
2765 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2766 }
2767
2768 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2769 {
2770 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2771#ifndef IN_NEM_DARWIN
2772 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2773 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2774#endif
2775 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2776 AssertRC(rc);
2777 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2778 }
2779
2780 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2781 {
2782 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2783#ifndef IN_NEM_DARWIN
2784 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2785 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2786#endif
2787 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2788 AssertRC(rc);
2789 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2790 }
2791
2792#ifdef VBOX_STRICT
2793 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2794#endif
2795 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2796 pCtx->cs.Attr.u));
2797 }
2798
2799 /*
2800 * Guest TR.
2801 */
2802 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2803 {
2804 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2805
2806 /*
2807 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2808 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2809 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2810 */
2811 uint16_t u16Sel;
2812 uint32_t u32Limit;
2813 uint64_t u64Base;
2814 uint32_t u32AccessRights;
2815#ifndef IN_NEM_DARWIN
2816 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2817#endif
2818 {
2819 u16Sel = pCtx->tr.Sel;
2820 u32Limit = pCtx->tr.u32Limit;
2821 u64Base = pCtx->tr.u64Base;
2822 u32AccessRights = pCtx->tr.Attr.u;
2823 }
2824#ifndef IN_NEM_DARWIN
2825 else
2826 {
2827 Assert(!pVmxTransient->fIsNestedGuest);
2828 Assert(pVM->hm.s.vmx.pRealModeTSS);
2829 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2830
2831 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2832 RTGCPHYS GCPhys;
2833 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2834 AssertRCReturn(rc, rc);
2835
2836 X86DESCATTR DescAttr;
2837 DescAttr.u = 0;
2838 DescAttr.n.u1Present = 1;
2839 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2840
2841 u16Sel = 0;
2842 u32Limit = HM_VTX_TSS_SIZE;
2843 u64Base = GCPhys;
2844 u32AccessRights = DescAttr.u;
2845 }
2846#endif
2847
2848 /* Validate. */
2849 Assert(!(u16Sel & RT_BIT(2)));
2850 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2851 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2852 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2853 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2854 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2855 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2856 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2857 Assert( (u32Limit & 0xfff) == 0xfff
2858 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2859 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2860 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2861
2862 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2863 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2864 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2865 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2866
2867 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2868 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2869 }
2870
2871 /*
2872 * Guest GDTR.
2873 */
2874 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2875 {
2876 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2877
2878 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2879 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2880
2881 /* Validate. */
2882 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2883
2884 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2885 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2886 }
2887
2888 /*
2889 * Guest LDTR.
2890 */
2891 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2892 {
2893 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2894
2895 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2896 uint32_t u32Access;
2897 if ( !pVmxTransient->fIsNestedGuest
2898 && !pCtx->ldtr.Attr.u)
2899 u32Access = X86DESCATTR_UNUSABLE;
2900 else
2901 u32Access = pCtx->ldtr.Attr.u;
2902
2903 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2904 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2905 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2906 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2907
2908 /* Validate. */
2909 if (!(u32Access & X86DESCATTR_UNUSABLE))
2910 {
2911 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2912 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2913 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2914 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2915 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2916 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2917 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2918 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2919 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2920 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2921 }
2922
2923 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2924 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2925 }
2926
2927 /*
2928 * Guest IDTR.
2929 */
2930 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2931 {
2932 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2933
2934 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2935 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2936
2937 /* Validate. */
2938 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2939
2940 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2941 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2942 }
2943
2944 return VINF_SUCCESS;
2945}
2946
2947
2948/**
2949 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2950 * VM-exit interruption info type.
2951 *
2952 * @returns The IEM exception flags.
2953 * @param uVector The event vector.
2954 * @param uVmxEventType The VMX event type.
2955 *
2956 * @remarks This function currently only constructs flags required for
2957 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2958 * and CR2 aspects of an exception are not included).
2959 */
2960static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2961{
2962 uint32_t fIemXcptFlags;
2963 switch (uVmxEventType)
2964 {
2965 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2966 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2967 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2968 break;
2969
2970 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2971 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2972 break;
2973
2974 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2975 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2976 break;
2977
2978 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2979 {
2980 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2981 if (uVector == X86_XCPT_BP)
2982 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2983 else if (uVector == X86_XCPT_OF)
2984 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2985 else
2986 {
2987 fIemXcptFlags = 0;
2988 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2989 }
2990 break;
2991 }
2992
2993 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2994 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2995 break;
2996
2997 default:
2998 fIemXcptFlags = 0;
2999 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3000 break;
3001 }
3002 return fIemXcptFlags;
3003}
3004
3005
3006/**
3007 * Sets an event as a pending event to be injected into the guest.
3008 *
3009 * @param pVCpu The cross context virtual CPU structure.
3010 * @param u32IntInfo The VM-entry interruption-information field.
3011 * @param cbInstr The VM-entry instruction length in bytes (for
3012 * software interrupts, exceptions and privileged
3013 * software exceptions).
3014 * @param u32ErrCode The VM-entry exception error code.
3015 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3016 * page-fault.
3017 */
3018DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3019 RTGCUINTPTR GCPtrFaultAddress)
3020{
3021 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3022 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3023 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3024 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3025 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3026 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3027}
3028
3029
3030/**
3031 * Sets an external interrupt as pending-for-injection into the VM.
3032 *
3033 * @param pVCpu The cross context virtual CPU structure.
3034 * @param u8Interrupt The external interrupt vector.
3035 */
3036DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3037{
3038 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3039 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3040 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3041 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3042 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3043 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
3044}
3045
3046
3047/**
3048 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3049 *
3050 * @param pVCpu The cross context virtual CPU structure.
3051 */
3052DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3053{
3054 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3055 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3056 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3057 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3058 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3059 Log4Func(("NMI pending injection\n"));
3060}
3061
3062
3063/**
3064 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3065 *
3066 * @param pVCpu The cross context virtual CPU structure.
3067 */
3068DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3069{
3070 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3071 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3072 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3073 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3074 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3075}
3076
3077
3078/**
3079 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3080 *
3081 * @param pVCpu The cross context virtual CPU structure.
3082 */
3083DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3084{
3085 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3086 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3087 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3088 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3089 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3090}
3091
3092
3093/**
3094 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3095 *
3096 * @param pVCpu The cross context virtual CPU structure.
3097 */
3098DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3099{
3100 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3101 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3102 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3103 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3104 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3105}
3106
3107
3108#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3109/**
3110 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3111 *
3112 * @param pVCpu The cross context virtual CPU structure.
3113 * @param u32ErrCode The error code for the general-protection exception.
3114 */
3115DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3116{
3117 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3118 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3119 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3120 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3121 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3122}
3123
3124
3125/**
3126 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3127 *
3128 * @param pVCpu The cross context virtual CPU structure.
3129 * @param u32ErrCode The error code for the stack exception.
3130 */
3131DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3132{
3133 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3134 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3135 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3136 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3137 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3138}
3139#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3140
3141
3142/**
3143 * Fixes up attributes for the specified segment register.
3144 *
3145 * @param pVCpu The cross context virtual CPU structure.
3146 * @param pSelReg The segment register that needs fixing.
3147 * @param pszRegName The register name (for logging and assertions).
3148 */
3149static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3150{
3151 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3152
3153 /*
3154 * If VT-x marks the segment as unusable, most other bits remain undefined:
3155 * - For CS the L, D and G bits have meaning.
3156 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3157 * - For the remaining data segments no bits are defined.
3158 *
3159 * The present bit and the unusable bit has been observed to be set at the
3160 * same time (the selector was supposed to be invalid as we started executing
3161 * a V8086 interrupt in ring-0).
3162 *
3163 * What should be important for the rest of the VBox code, is that the P bit is
3164 * cleared. Some of the other VBox code recognizes the unusable bit, but
3165 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3166 * safe side here, we'll strip off P and other bits we don't care about. If
3167 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3168 *
3169 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3170 */
3171#ifdef VBOX_STRICT
3172 uint32_t const uAttr = pSelReg->Attr.u;
3173#endif
3174
3175 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3176 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3177 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3178
3179#ifdef VBOX_STRICT
3180# ifndef IN_NEM_DARWIN
3181 VMMRZCallRing3Disable(pVCpu);
3182# endif
3183 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3184# ifdef DEBUG_bird
3185 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3186 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3187 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3188# endif
3189# ifndef IN_NEM_DARWIN
3190 VMMRZCallRing3Enable(pVCpu);
3191# endif
3192 NOREF(uAttr);
3193#endif
3194 RT_NOREF2(pVCpu, pszRegName);
3195}
3196
3197
3198/**
3199 * Imports a guest segment register from the current VMCS into the guest-CPU
3200 * context.
3201 *
3202 * @param pVCpu The cross context virtual CPU structure.
3203 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3204 *
3205 * @remarks Called with interrupts and/or preemption disabled.
3206 */
3207template<uint32_t const a_iSegReg>
3208DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3209{
3210 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3211 /* Check that the macros we depend upon here and in the export parenter function works: */
3212#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3213 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3214 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3215 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3216 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3217 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3218 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3219 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3220 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3221 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3222 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3223
3224 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3225
3226 uint16_t u16Sel;
3227 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3228 pSelReg->Sel = u16Sel;
3229 pSelReg->ValidSel = u16Sel;
3230
3231 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3232 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3233
3234 uint32_t u32Attr;
3235 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3236 pSelReg->Attr.u = u32Attr;
3237 if (u32Attr & X86DESCATTR_UNUSABLE)
3238 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3239
3240 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3241}
3242
3243
3244/**
3245 * Imports the guest LDTR from the VMCS into the guest-CPU context.
3246 *
3247 * @param pVCpu The cross context virtual CPU structure.
3248 *
3249 * @remarks Called with interrupts and/or preemption disabled.
3250 */
3251DECL_FORCE_INLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3252{
3253 uint16_t u16Sel;
3254 uint64_t u64Base;
3255 uint32_t u32Limit, u32Attr;
3256 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3257 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3258 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3259 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3260
3261 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3262 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3263 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3264 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3265 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3266 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3267 if (u32Attr & X86DESCATTR_UNUSABLE)
3268 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3269}
3270
3271
3272/**
3273 * Imports the guest TR from the VMCS into the guest-CPU context.
3274 *
3275 * @param pVCpu The cross context virtual CPU structure.
3276 *
3277 * @remarks Called with interrupts and/or preemption disabled.
3278 */
3279DECL_FORCE_INLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3280{
3281 uint16_t u16Sel;
3282 uint64_t u64Base;
3283 uint32_t u32Limit, u32Attr;
3284 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3285 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3286 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3287 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3288
3289 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3290 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3291 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3292 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3293 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3294 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3295 /* TR is the only selector that can never be unusable. */
3296 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3297}
3298
3299
3300/**
3301 * Core: Imports the guest RIP from the VMCS into the guest-CPU context.
3302 *
3303 * @returns The RIP value.
3304 * @param pVCpu The cross context virtual CPU structure.
3305 *
3306 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3307 * @remarks Do -not- call this function directly!
3308 */
3309DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3310{
3311 uint64_t u64Val;
3312 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3313 AssertRC(rc);
3314
3315 pVCpu->cpum.GstCtx.rip = u64Val;
3316
3317 return u64Val;
3318}
3319
3320
3321/**
3322 * Imports the guest RIP from the VMCS into the guest-CPU context.
3323 *
3324 * @param pVCpu The cross context virtual CPU structure.
3325 *
3326 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3327 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3328 * instead!!!
3329 */
3330DECL_FORCE_INLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3331{
3332 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3333 {
3334 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3335 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3336 }
3337}
3338
3339
3340/**
3341 * Core: Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3342 *
3343 * @param pVCpu The cross context virtual CPU structure.
3344 * @param pVmcsInfo The VMCS info. object.
3345 *
3346 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3347 * @remarks Do -not- call this function directly!
3348 */
3349DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3350{
3351 uint64_t fRFlags;
3352 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3353 AssertRC(rc);
3354
3355 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3356 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3357
3358 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3359#ifndef IN_NEM_DARWIN
3360 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3361 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3362 { /* mostly likely */ }
3363 else
3364 {
3365 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3366 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3367 }
3368#else
3369 RT_NOREF(pVmcsInfo);
3370#endif
3371}
3372
3373
3374/**
3375 * Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3376 *
3377 * @param pVCpu The cross context virtual CPU structure.
3378 * @param pVmcsInfo The VMCS info. object.
3379 *
3380 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3381 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3382 * instead!!!
3383 */
3384DECL_FORCE_INLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3385{
3386 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3387 {
3388 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3389 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3390 }
3391}
3392
3393
3394#ifndef IN_NEM_DARWIN
3395/**
3396 * Imports the guest TSX AUX and certain other MSRs from the VMCS into the guest-CPU
3397 * context.
3398 *
3399 * The other MSRs are in the VM-exit MSR-store.
3400 *
3401 * @returns VBox status code.
3402 * @param pVCpu The cross context virtual CPU structure.
3403 * @param pVmcsInfo The VMCS info. object.
3404 * @param fEFlags Saved EFLAGS for restoring the interrupt flag (in case of
3405 * unexpected errors). Ignored in NEM/darwin context.
3406 */
3407DECL_FORCE_INLINE(int) vmxHCImportGuestTscAuxAndOtherMsrs(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3408{
3409 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3410 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3411 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3412 Assert(pMsrs);
3413 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3414 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3415 for (uint32_t i = 0; i < cMsrs; i++)
3416 {
3417 uint32_t const idMsr = pMsrs[i].u32Msr;
3418 switch (idMsr)
3419 {
3420 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3421 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3422 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3423 default:
3424 {
3425 uint32_t idxLbrMsr;
3426 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3427 if (VM_IS_VMX_LBR(pVM))
3428 {
3429 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3430 {
3431 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3432 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3433 break;
3434 }
3435 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3436 {
3437 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3438 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3439 break;
3440 }
3441 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3442 {
3443 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3444 break;
3445 }
3446 /* Fallthru (no break) */
3447 }
3448 pVCpu->cpum.GstCtx.fExtrn = 0;
3449 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3450 ASMSetFlags(fEFlags);
3451 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3452 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3453 }
3454 }
3455 }
3456 return VINF_SUCCESS;
3457}
3458#endif /* !IN_NEM_DARWIN */
3459
3460
3461/**
3462 * Imports the guest CR0 from the VMCS into the guest-CPU context.
3463 *
3464 * @param pVCpu The cross context virtual CPU structure.
3465 * @param pVmcsInfo The VMCS info. object.
3466 */
3467DECL_FORCE_INLINE(void) vmxHCImportGuestCr0(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3468{
3469 uint64_t u64Cr0;
3470 uint64_t u64Shadow;
3471 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3472 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3473#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3474 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3475 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3476#else
3477 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
3478 {
3479 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3480 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3481 }
3482 else
3483 {
3484 /*
3485 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3486 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3487 * re-construct CR0. See @bugref{9180#c95} for details.
3488 */
3489 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3490 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3491 u64Cr0 = (u64Cr0 & ~(pVmcsInfoGst->u64Cr0Mask & pVmcsNstGst->u64Cr0Mask.u))
3492 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3493 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3494 Assert(u64Cr0 & X86_CR0_NE);
3495 }
3496#endif
3497
3498#ifndef IN_NEM_DARWIN
3499 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3500#endif
3501 CPUMSetGuestCR0(pVCpu, u64Cr0);
3502#ifndef IN_NEM_DARWIN
3503 VMMRZCallRing3Enable(pVCpu);
3504#endif
3505}
3506
3507
3508/**
3509 * Imports the guest CR3 from the VMCS into the guest-CPU context.
3510 *
3511 * @param pVCpu The cross context virtual CPU structure.
3512 */
3513DECL_FORCE_INLINE(void) vmxHCImportGuestCr3(PVMCPUCC pVCpu)
3514{
3515 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3516 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3517
3518 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3519 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3520 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3521 && CPUMIsGuestPagingEnabledEx(pCtx)))
3522 {
3523 uint64_t u64Cr3;
3524 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3525 if (pCtx->cr3 != u64Cr3)
3526 {
3527 pCtx->cr3 = u64Cr3;
3528 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3529 }
3530
3531 /*
3532 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3533 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3534 */
3535 if (CPUMIsGuestInPAEModeEx(pCtx))
3536 {
3537 X86PDPE aPaePdpes[4];
3538 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3539 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3540 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3541 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3542 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3543 {
3544 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3545 /* PGM now updates PAE PDPTEs while updating CR3. */
3546 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3547 }
3548 }
3549 }
3550}
3551
3552
3553/**
3554 * Imports the guest CR4 from the VMCS into the guest-CPU context.
3555 *
3556 * @param pVCpu The cross context virtual CPU structure.
3557 * @param pVmcsInfo The VMCS info. object.
3558 */
3559DECL_FORCE_INLINE(void) vmxHCImportGuestCr4(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3560{
3561 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3562 uint64_t u64Cr4;
3563 uint64_t u64Shadow;
3564 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3565 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3566#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3567 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3568 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3569#else
3570 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3571 {
3572 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3573 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3574 }
3575 else
3576 {
3577 /*
3578 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3579 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3580 * re-construct CR4. See @bugref{9180#c95} for details.
3581 */
3582 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3583 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3584 u64Cr4 = (u64Cr4 & ~(pVmcsInfo->u64Cr4Mask & pVmcsNstGst->u64Cr4Mask.u))
3585 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3586 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3587 Assert(u64Cr4 & X86_CR4_VMXE);
3588 }
3589#endif
3590 pCtx->cr4 = u64Cr4;
3591}
3592
3593
3594/**
3595 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3596 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3597 */
3598DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3599{
3600 /*
3601 * We must import RIP here to set our EM interrupt-inhibited state.
3602 * We also import RFLAGS as our code that evaluates pending interrupts
3603 * before VM-entry requires it.
3604 */
3605 vmxHCImportGuestRip(pVCpu);
3606 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3607
3608 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3609 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3610 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3611 pVCpu->cpum.GstCtx.rip);
3612 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3613}
3614
3615
3616/**
3617 * Imports the guest interruptibility-state from the VMCS into the guest-CPU
3618 * context.
3619 *
3620 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3621 *
3622 * @param pVCpu The cross context virtual CPU structure.
3623 * @param pVmcsInfo The VMCS info. object.
3624 *
3625 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3626 * do not log!
3627 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3628 * instead!!!
3629 */
3630DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3631{
3632 uint32_t u32Val;
3633 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3634 Assert((u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
3635 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
3636 if (!u32Val)
3637 {
3638 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3639 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3640 }
3641 else
3642 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3643}
3644
3645
3646/**
3647 * Worker for VMXR0ImportStateOnDemand.
3648 *
3649 * @returns VBox status code.
3650 * @param pVCpu The cross context virtual CPU structure.
3651 * @param pVmcsInfo The VMCS info. object.
3652 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3653 */
3654static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3655{
3656 int rc = VINF_SUCCESS;
3657 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3658 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3659 uint32_t u32Val;
3660
3661 /*
3662 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3663 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3664 * neither are other host platforms.
3665 *
3666 * Committing this temporarily as it prevents BSOD.
3667 *
3668 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3669 */
3670#ifdef RT_OS_WINDOWS
3671 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3672 return VERR_HM_IPE_1;
3673#endif
3674
3675 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3676
3677#ifndef IN_NEM_DARWIN
3678 /*
3679 * We disable interrupts to make the updating of the state and in particular
3680 * the fExtrn modification atomic wrt to preemption hooks.
3681 */
3682 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3683#endif
3684
3685 fWhat &= pCtx->fExtrn;
3686 if (fWhat)
3687 {
3688 do
3689 {
3690 if (fWhat & CPUMCTX_EXTRN_RIP)
3691 vmxHCImportGuestRip(pVCpu);
3692
3693 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3694 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3695
3696 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3697 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3698 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3699
3700 if (fWhat & CPUMCTX_EXTRN_RSP)
3701 {
3702 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3703 AssertRC(rc);
3704 }
3705
3706 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3707 {
3708 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3709#ifndef IN_NEM_DARWIN
3710 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3711#else
3712 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3713#endif
3714 if (fWhat & CPUMCTX_EXTRN_CS)
3715 {
3716 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3717 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3718 if (fRealOnV86Active)
3719 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3720 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3721 }
3722 if (fWhat & CPUMCTX_EXTRN_SS)
3723 {
3724 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3725 if (fRealOnV86Active)
3726 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3727 }
3728 if (fWhat & CPUMCTX_EXTRN_DS)
3729 {
3730 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3731 if (fRealOnV86Active)
3732 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3733 }
3734 if (fWhat & CPUMCTX_EXTRN_ES)
3735 {
3736 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3737 if (fRealOnV86Active)
3738 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3739 }
3740 if (fWhat & CPUMCTX_EXTRN_FS)
3741 {
3742 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3743 if (fRealOnV86Active)
3744 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3745 }
3746 if (fWhat & CPUMCTX_EXTRN_GS)
3747 {
3748 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3749 if (fRealOnV86Active)
3750 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3751 }
3752 }
3753
3754 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3755 {
3756 if (fWhat & CPUMCTX_EXTRN_LDTR)
3757 vmxHCImportGuestLdtr(pVCpu);
3758
3759 if (fWhat & CPUMCTX_EXTRN_GDTR)
3760 {
3761 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3762 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3763 pCtx->gdtr.cbGdt = u32Val;
3764 }
3765
3766 /* Guest IDTR. */
3767 if (fWhat & CPUMCTX_EXTRN_IDTR)
3768 {
3769 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3770 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3771 pCtx->idtr.cbIdt = u32Val;
3772 }
3773
3774 /* Guest TR. */
3775 if (fWhat & CPUMCTX_EXTRN_TR)
3776 {
3777#ifndef IN_NEM_DARWIN
3778 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3779 don't need to import that one. */
3780 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3781#endif
3782 vmxHCImportGuestTr(pVCpu);
3783 }
3784 }
3785
3786 if (fWhat & CPUMCTX_EXTRN_DR7)
3787 {
3788#ifndef IN_NEM_DARWIN
3789 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3790#endif
3791 {
3792 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3793 AssertRC(rc);
3794 }
3795 }
3796
3797 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3798 {
3799 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3800 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3801 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3802 pCtx->SysEnter.cs = u32Val;
3803 }
3804
3805#ifndef IN_NEM_DARWIN
3806 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3807 {
3808 if ( pVM->hmr0.s.fAllow64BitGuests
3809 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3810 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3811 }
3812
3813 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3814 {
3815 if ( pVM->hmr0.s.fAllow64BitGuests
3816 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3817 {
3818 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3819 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3820 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3821 }
3822 }
3823
3824 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3825 {
3826 rc = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
3827 AssertRCReturn(rc, rc);
3828 }
3829#else
3830 NOREF(pVM);
3831#endif
3832
3833 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3834 {
3835 if (fWhat & CPUMCTX_EXTRN_CR0)
3836 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
3837
3838 if (fWhat & CPUMCTX_EXTRN_CR4)
3839 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
3840
3841 if (fWhat & CPUMCTX_EXTRN_CR3)
3842 vmxHCImportGuestCr3(pVCpu);
3843 }
3844
3845#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3846 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3847 {
3848 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3849 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3850 {
3851 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3852 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3853 if (RT_SUCCESS(rc))
3854 { /* likely */ }
3855 else
3856 break;
3857 }
3858 }
3859#endif
3860 } while (0);
3861
3862 if (RT_SUCCESS(rc))
3863 {
3864 /* Update fExtrn. */
3865 pCtx->fExtrn &= ~fWhat;
3866
3867 /* If everything has been imported, clear the HM keeper bit. */
3868 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3869 {
3870#ifndef IN_NEM_DARWIN
3871 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3872#else
3873 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3874#endif
3875 Assert(!pCtx->fExtrn);
3876 }
3877 }
3878 }
3879#ifndef IN_NEM_DARWIN
3880 else
3881 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3882
3883 /*
3884 * Restore interrupts.
3885 */
3886 ASMSetFlags(fEFlags);
3887#endif
3888
3889 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3890
3891 if (RT_SUCCESS(rc))
3892 { /* likely */ }
3893 else
3894 return rc;
3895
3896 /*
3897 * Honor any pending CR3 updates.
3898 *
3899 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3900 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3901 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3902 *
3903 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3904 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3905 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3906 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3907 *
3908 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3909 *
3910 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3911 */
3912 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3913#ifndef IN_NEM_DARWIN
3914 && VMMRZCallRing3IsEnabled(pVCpu)
3915#endif
3916 )
3917 {
3918 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3919 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3920 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3921 }
3922
3923 return VINF_SUCCESS;
3924}
3925
3926
3927/**
3928 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3929 *
3930 * @returns VBox status code.
3931 * @param pVCpu The cross context virtual CPU structure.
3932 * @param pVmcsInfo The VMCS info. object.
3933 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3934 * in NEM/darwin context.
3935 * @tparam a_fWhat What to import, zero or more bits from
3936 * HMVMX_CPUMCTX_EXTRN_ALL.
3937 */
3938template<uint64_t const a_fWhat>
3939static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3940{
3941 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3942 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3943 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3944 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3945
3946 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3947
3948 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3949
3950 /* RIP and RFLAGS may have been imported already by the post exit code
3951 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3952 of the code is skipping this part of the code. */
3953 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3954 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3955 {
3956 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3957 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3958
3959 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3960 {
3961 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3962 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3963 else
3964 vmxHCImportGuestCoreRip(pVCpu);
3965 }
3966 }
3967
3968 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3969 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3970 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3971
3972 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3973 {
3974 if (a_fWhat & CPUMCTX_EXTRN_CS)
3975 {
3976 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3977 /** @todo try get rid of this carp, it smells and is probably never ever
3978 * used: */
3979 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3980 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3981 {
3982 vmxHCImportGuestCoreRip(pVCpu);
3983 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3984 }
3985 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3986 }
3987 if (a_fWhat & CPUMCTX_EXTRN_SS)
3988 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3989 if (a_fWhat & CPUMCTX_EXTRN_DS)
3990 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3991 if (a_fWhat & CPUMCTX_EXTRN_ES)
3992 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3993 if (a_fWhat & CPUMCTX_EXTRN_FS)
3994 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3995 if (a_fWhat & CPUMCTX_EXTRN_GS)
3996 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3997
3998 /* Guest TR.
3999 Real-mode emulation using virtual-8086 mode has the fake TSS
4000 (pRealModeTSS) in TR, don't need to import that one. */
4001#ifndef IN_NEM_DARWIN
4002 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
4003 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
4004 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
4005#else
4006 if (a_fWhat & CPUMCTX_EXTRN_TR)
4007#endif
4008 vmxHCImportGuestTr(pVCpu);
4009
4010#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
4011 if (fRealOnV86Active)
4012 {
4013 if (a_fWhat & CPUMCTX_EXTRN_CS)
4014 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
4015 if (a_fWhat & CPUMCTX_EXTRN_SS)
4016 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
4017 if (a_fWhat & CPUMCTX_EXTRN_DS)
4018 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
4019 if (a_fWhat & CPUMCTX_EXTRN_ES)
4020 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
4021 if (a_fWhat & CPUMCTX_EXTRN_FS)
4022 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
4023 if (a_fWhat & CPUMCTX_EXTRN_GS)
4024 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
4025 }
4026#endif
4027 }
4028
4029 if (a_fWhat & CPUMCTX_EXTRN_RSP)
4030 {
4031 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
4032 AssertRC(rc);
4033 }
4034
4035 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
4036 vmxHCImportGuestLdtr(pVCpu);
4037
4038 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
4039 {
4040 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
4041 uint32_t u32Val;
4042 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
4043 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
4044 }
4045
4046 /* Guest IDTR. */
4047 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
4048 {
4049 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
4050 uint32_t u32Val;
4051 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
4052 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
4053 }
4054
4055 if (a_fWhat & CPUMCTX_EXTRN_DR7)
4056 {
4057#ifndef IN_NEM_DARWIN
4058 if (!pVCpu->hmr0.s.fUsingHyperDR7)
4059#endif
4060 {
4061 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
4062 AssertRC(rc);
4063 }
4064 }
4065
4066 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
4067 {
4068 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
4069 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
4070 uint32_t u32Val;
4071 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
4072 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
4073 }
4074
4075#ifndef IN_NEM_DARWIN
4076 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4077 {
4078 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4079 && pVM->hmr0.s.fAllow64BitGuests)
4080 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4081 }
4082
4083 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4084 {
4085 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4086 && pVM->hmr0.s.fAllow64BitGuests)
4087 {
4088 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4089 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4090 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4091 }
4092 }
4093
4094 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4095 {
4096 int const rc1 = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
4097 AssertRCReturn(rc1, rc1);
4098 }
4099#else
4100 NOREF(pVM);
4101#endif
4102
4103 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4104 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
4105
4106 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4107 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
4108
4109 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4110 vmxHCImportGuestCr3(pVCpu);
4111
4112#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4113 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4114 {
4115 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4116 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4117 {
4118 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4119 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4120 AssertRCReturn(rc, rc);
4121 }
4122 }
4123#endif
4124
4125 /* Update fExtrn. */
4126 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4127
4128 /* If everything has been imported, clear the HM keeper bit. */
4129 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4130 {
4131#ifndef IN_NEM_DARWIN
4132 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4133#else
4134 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4135#endif
4136 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4137 }
4138
4139 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4140
4141 /*
4142 * Honor any pending CR3 updates.
4143 *
4144 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4145 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4146 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4147 *
4148 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4149 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4150 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4151 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4152 *
4153 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4154 *
4155 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4156 */
4157#ifndef IN_NEM_DARWIN
4158 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4159 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4160 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4161 return VINF_SUCCESS;
4162 ASMSetFlags(fEFlags);
4163#else
4164 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4165 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4166 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4167 return VINF_SUCCESS;
4168 RT_NOREF_PV(fEFlags);
4169#endif
4170
4171 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4172 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4173 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4174 return VINF_SUCCESS;
4175}
4176
4177
4178/**
4179 * Internal state fetcher.
4180 *
4181 * @returns VBox status code.
4182 * @param pVCpu The cross context virtual CPU structure.
4183 * @param pVmcsInfo The VMCS info. object.
4184 * @param pszCaller For logging.
4185 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4186 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4187 * already. This is ORed together with @a a_fWhat when
4188 * calculating what needs fetching (just for safety).
4189 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4190 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4191 * already. This is ORed together with @a a_fWhat when
4192 * calculating what needs fetching (just for safety).
4193 */
4194template<uint64_t const a_fWhat,
4195 uint64_t const a_fDoneLocal = 0,
4196 uint64_t const a_fDonePostExit = 0
4197#ifndef IN_NEM_DARWIN
4198 | CPUMCTX_EXTRN_INHIBIT_INT
4199 | CPUMCTX_EXTRN_INHIBIT_NMI
4200# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4201 | HMVMX_CPUMCTX_EXTRN_ALL
4202# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4203 | CPUMCTX_EXTRN_RFLAGS
4204# endif
4205#else /* IN_NEM_DARWIN */
4206 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4207#endif /* IN_NEM_DARWIN */
4208>
4209DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4210{
4211 RT_NOREF_PV(pszCaller);
4212 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4213 {
4214#ifndef IN_NEM_DARWIN
4215 /*
4216 * We disable interrupts to make the updating of the state and in particular
4217 * the fExtrn modification atomic wrt to preemption hooks.
4218 */
4219 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4220#else
4221 RTCCUINTREG const fEFlags = 0;
4222#endif
4223
4224 /*
4225 * We combine all three parameters and take the (probably) inlined optimized
4226 * code path for the new things specified in a_fWhat.
4227 *
4228 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4229 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4230 * also take the streamlined path when both of these are cleared in fExtrn
4231 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4232 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4233 */
4234 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4235 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4236 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4237 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4238 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4239 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4240 {
4241 int const rc = vmxHCImportGuestStateInner< a_fWhat
4242 & HMVMX_CPUMCTX_EXTRN_ALL
4243 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4244#ifndef IN_NEM_DARWIN
4245 ASMSetFlags(fEFlags);
4246#endif
4247 return rc;
4248 }
4249
4250#ifndef IN_NEM_DARWIN
4251 ASMSetFlags(fEFlags);
4252#endif
4253
4254 /*
4255 * We shouldn't normally get here, but it may happen when executing
4256 * in the debug run-loops. Typically, everything should already have
4257 * been fetched then. Otherwise call the fallback state import function.
4258 */
4259 if (fWhatToDo == 0)
4260 { /* hope the cause was the debug loop or something similar */ }
4261 else
4262 {
4263 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4264 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4265 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4266 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4267 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4268 }
4269 }
4270 return VINF_SUCCESS;
4271}
4272
4273
4274/**
4275 * Check per-VM and per-VCPU force flag actions that require us to go back to
4276 * ring-3 for one reason or another.
4277 *
4278 * @returns Strict VBox status code (i.e. informational status codes too)
4279 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4280 * ring-3.
4281 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4282 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4283 * interrupts)
4284 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4285 * all EMTs to be in ring-3.
4286 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4287 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4288 * to the EM loop.
4289 *
4290 * @param pVCpu The cross context virtual CPU structure.
4291 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4292 * @param fStepping Whether we are single-stepping the guest using the
4293 * hypervisor debugger.
4294 *
4295 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4296 * is no longer in VMX non-root mode.
4297 */
4298static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4299{
4300#ifndef IN_NEM_DARWIN
4301 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4302#endif
4303
4304 /*
4305 * Update pending interrupts into the APIC's IRR.
4306 */
4307 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4308 APICUpdatePendingInterrupts(pVCpu);
4309
4310 /*
4311 * Anything pending? Should be more likely than not if we're doing a good job.
4312 */
4313 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4314 if ( !fStepping
4315 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4316 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4317 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4318 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4319 return VINF_SUCCESS;
4320
4321 /* Pending PGM C3 sync. */
4322 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4323 {
4324 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4325 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4326 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4327 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4328 if (rcStrict != VINF_SUCCESS)
4329 {
4330 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4331 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4332 return rcStrict;
4333 }
4334 }
4335
4336 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4337 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4338 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4339 {
4340 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4341 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4342 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4343 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4344 return rc;
4345 }
4346
4347 /* Pending VM request packets, such as hardware interrupts. */
4348 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4349 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4350 {
4351 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4352 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4353 return VINF_EM_PENDING_REQUEST;
4354 }
4355
4356 /* Pending PGM pool flushes. */
4357 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4358 {
4359 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4360 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4361 return VINF_PGM_POOL_FLUSH_PENDING;
4362 }
4363
4364 /* Pending DMA requests. */
4365 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4366 {
4367 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4368 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4369 return VINF_EM_RAW_TO_R3;
4370 }
4371
4372#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4373 /*
4374 * Pending nested-guest events.
4375 *
4376 * Please note the priority of these events are specified and important.
4377 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4378 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4379 *
4380 * Interrupt-window and NMI-window VM-exits for the nested-guest need not be
4381 * handled here. They'll be handled by the hardware while executing the nested-guest
4382 * or by us when we injecting events that are not part of VM-entry of the nested-guest.
4383 */
4384 if (fIsNestedGuest)
4385 {
4386 /* Pending nested-guest APIC-write (may or may not cause a VM-exit). */
4387 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4388 {
4389 Log4Func(("Pending nested-guest APIC-write\n"));
4390 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4391 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4392 if ( rcStrict == VINF_SUCCESS
4393 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4394 return rcStrict;
4395 }
4396
4397 /* Pending nested-guest monitor-trap flag (MTF). */
4398 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4399 {
4400 Log4Func(("Pending nested-guest MTF\n"));
4401 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4402 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4403 return rcStrict;
4404 }
4405
4406 /* Pending nested-guest VMX-preemption timer expired. */
4407 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4408 {
4409 Log4Func(("Pending nested-guest preempt timer\n"));
4410 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4411 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4412 return rcStrict;
4413 }
4414 }
4415#else
4416 NOREF(fIsNestedGuest);
4417#endif
4418
4419 return VINF_SUCCESS;
4420}
4421
4422
4423/**
4424 * Converts any TRPM trap into a pending HM event. This is typically used when
4425 * entering from ring-3 (not longjmp returns).
4426 *
4427 * @param pVCpu The cross context virtual CPU structure.
4428 */
4429static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4430{
4431 Assert(TRPMHasTrap(pVCpu));
4432 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4433
4434 uint8_t uVector;
4435 TRPMEVENT enmTrpmEvent;
4436 uint32_t uErrCode;
4437 RTGCUINTPTR GCPtrFaultAddress;
4438 uint8_t cbInstr;
4439 bool fIcebp;
4440
4441 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4442 AssertRC(rc);
4443
4444 uint32_t u32IntInfo;
4445 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4446 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4447
4448 rc = TRPMResetTrap(pVCpu);
4449 AssertRC(rc);
4450 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4451 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4452
4453 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4454}
4455
4456
4457/**
4458 * Converts the pending HM event into a TRPM trap.
4459 *
4460 * @param pVCpu The cross context virtual CPU structure.
4461 */
4462static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4463{
4464 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4465
4466 /* If a trap was already pending, we did something wrong! */
4467 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4468
4469 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4470 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4471 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4472
4473 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4474
4475 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4476 AssertRC(rc);
4477
4478 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4479 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4480
4481 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4482 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4483 else
4484 {
4485 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4486 switch (uVectorType)
4487 {
4488 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4489 TRPMSetTrapDueToIcebp(pVCpu);
4490 RT_FALL_THRU();
4491 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4492 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4493 {
4494 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4495 || ( uVector == X86_XCPT_BP /* INT3 */
4496 || uVector == X86_XCPT_OF /* INTO */
4497 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4498 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4499 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4500 break;
4501 }
4502 }
4503 }
4504
4505 /* We're now done converting the pending event. */
4506 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4507}
4508
4509
4510/**
4511 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4512 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4513 *
4514 * @param pVCpu The cross context virtual CPU structure.
4515 * @param pVmcsInfo The VMCS info. object.
4516 */
4517static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4518{
4519 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4520 {
4521 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4522 {
4523 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4524 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4525 AssertRC(rc);
4526 }
4527 Log4Func(("Enabled interrupt-window exiting\n"));
4528 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4529}
4530
4531
4532/**
4533 * Clears the interrupt-window exiting control in the VMCS.
4534 *
4535 * @param pVCpu The cross context virtual CPU structure.
4536 * @param pVmcsInfo The VMCS info. object.
4537 */
4538DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4539{
4540 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4541 {
4542 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4543 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4544 AssertRC(rc);
4545 Log4Func(("Disabled interrupt-window exiting\n"));
4546 }
4547}
4548
4549
4550/**
4551 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4552 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4553 *
4554 * @param pVCpu The cross context virtual CPU structure.
4555 * @param pVmcsInfo The VMCS info. object.
4556 */
4557static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4558{
4559 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4560 {
4561 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4562 {
4563 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4564 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4565 AssertRC(rc);
4566 Log4Func(("Enabled NMI-window exiting\n"));
4567 }
4568 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4569}
4570
4571
4572/**
4573 * Clears the NMI-window exiting control in the VMCS.
4574 *
4575 * @param pVCpu The cross context virtual CPU structure.
4576 * @param pVmcsInfo The VMCS info. object.
4577 */
4578DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4579{
4580 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4581 {
4582 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4583 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4584 AssertRC(rc);
4585 Log4Func(("Disabled NMI-window exiting\n"));
4586 }
4587}
4588
4589
4590/**
4591 * Injects an event into the guest upon VM-entry by updating the relevant fields
4592 * in the VM-entry area in the VMCS.
4593 *
4594 * @returns Strict VBox status code (i.e. informational status codes too).
4595 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4596 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4597 *
4598 * @param pVCpu The cross context virtual CPU structure.
4599 * @param pVmcsInfo The VMCS info object.
4600 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4601 * @param pEvent The event being injected.
4602 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4603 * will be updated if necessary. This cannot not be NULL.
4604 * @param fStepping Whether we're single-stepping guest execution and should
4605 * return VINF_EM_DBG_STEPPED if the event is injected
4606 * directly (registers modified by us, not by hardware on
4607 * VM-entry).
4608 */
4609static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4610 bool fStepping, uint32_t *pfIntrState)
4611{
4612 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4613 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4614 Assert(pfIntrState);
4615
4616#ifdef IN_NEM_DARWIN
4617 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4618#endif
4619
4620 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4621 uint32_t u32IntInfo = pEvent->u64IntInfo;
4622 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4623 uint32_t const cbInstr = pEvent->cbInstr;
4624 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4625 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4626 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4627
4628#ifdef VBOX_STRICT
4629 /*
4630 * Validate the error-code-valid bit for hardware exceptions.
4631 * No error codes for exceptions in real-mode.
4632 *
4633 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4634 */
4635 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4636 && !CPUMIsGuestInRealModeEx(pCtx))
4637 {
4638 switch (uVector)
4639 {
4640 case X86_XCPT_PF:
4641 case X86_XCPT_DF:
4642 case X86_XCPT_TS:
4643 case X86_XCPT_NP:
4644 case X86_XCPT_SS:
4645 case X86_XCPT_GP:
4646 case X86_XCPT_AC:
4647 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4648 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4649 RT_FALL_THRU();
4650 default:
4651 break;
4652 }
4653 }
4654
4655 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4656 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4657 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4658#endif
4659
4660 RT_NOREF(uVector);
4661 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4662 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4663 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4664 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4665 {
4666 Assert(uVector <= X86_XCPT_LAST);
4667 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4668 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4669 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4670 }
4671 else
4672 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4673
4674 /*
4675 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4676 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4677 * interrupt handler in the (real-mode) guest.
4678 *
4679 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4680 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4681 */
4682 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4683 {
4684#ifndef IN_NEM_DARWIN
4685 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4686#endif
4687 {
4688 /*
4689 * For CPUs with unrestricted guest execution enabled and with the guest
4690 * in real-mode, we must not set the deliver-error-code bit.
4691 *
4692 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4693 */
4694 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4695 }
4696#ifndef IN_NEM_DARWIN
4697 else
4698 {
4699 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4700 Assert(PDMVmmDevHeapIsEnabled(pVM));
4701 Assert(pVM->hm.s.vmx.pRealModeTSS);
4702 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4703
4704 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4705 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4706 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4707 AssertRCReturn(rc2, rc2);
4708
4709 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4710 size_t const cbIdtEntry = sizeof(X86IDTR16);
4711 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4712 {
4713 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4714 if (uVector == X86_XCPT_DF)
4715 return VINF_EM_RESET;
4716
4717 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4718 No error codes for exceptions in real-mode. */
4719 if (uVector == X86_XCPT_GP)
4720 {
4721 static HMEVENT const s_EventXcptDf
4722 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4723 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4724 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4725 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4726 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4727 }
4728
4729 /*
4730 * If we're injecting an event with no valid IDT entry, inject a #GP.
4731 * No error codes for exceptions in real-mode.
4732 *
4733 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4734 */
4735 static HMEVENT const s_EventXcptGp
4736 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4737 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4738 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4739 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4740 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4741 }
4742
4743 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4744 uint16_t uGuestIp = pCtx->ip;
4745 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4746 {
4747 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4748 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4749 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4750 }
4751 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4752 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4753
4754 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4755 X86IDTR16 IdtEntry;
4756 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4757 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4758 AssertRCReturn(rc2, rc2);
4759
4760 /* Construct the stack frame for the interrupt/exception handler. */
4761 VBOXSTRICTRC rcStrict;
4762 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4763 if (rcStrict == VINF_SUCCESS)
4764 {
4765 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4766 if (rcStrict == VINF_SUCCESS)
4767 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4768 }
4769
4770 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4771 if (rcStrict == VINF_SUCCESS)
4772 {
4773 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4774 pCtx->rip = IdtEntry.offSel;
4775 pCtx->cs.Sel = IdtEntry.uSel;
4776 pCtx->cs.ValidSel = IdtEntry.uSel;
4777 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4778 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4779 && uVector == X86_XCPT_PF)
4780 pCtx->cr2 = GCPtrFault;
4781
4782 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4783 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4784 | HM_CHANGED_GUEST_RSP);
4785
4786 /*
4787 * If we delivered a hardware exception (other than an NMI) and if there was
4788 * block-by-STI in effect, we should clear it.
4789 */
4790 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4791 {
4792 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4793 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4794 Log4Func(("Clearing inhibition due to STI\n"));
4795 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4796 }
4797
4798 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4799 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4800
4801 /*
4802 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4803 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4804 */
4805 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4806
4807 /*
4808 * If we eventually support nested-guest execution without unrestricted guest execution,
4809 * we should set fInterceptEvents here.
4810 */
4811 Assert(!fIsNestedGuest);
4812
4813 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4814 if (fStepping)
4815 rcStrict = VINF_EM_DBG_STEPPED;
4816 }
4817 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4818 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4819 return rcStrict;
4820 }
4821#else
4822 RT_NOREF(pVmcsInfo);
4823#endif
4824 }
4825
4826 /*
4827 * Validate.
4828 */
4829 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4830 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4831
4832 /*
4833 * Inject the event into the VMCS.
4834 */
4835 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4836 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4837 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4838 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4839 AssertRC(rc);
4840
4841 /*
4842 * Update guest CR2 if this is a page-fault.
4843 */
4844 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4845 pCtx->cr2 = GCPtrFault;
4846
4847 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4848 return VINF_SUCCESS;
4849}
4850
4851
4852/**
4853 * Evaluates the event to be delivered to the guest and sets it as the pending
4854 * event.
4855 *
4856 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4857 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4858 * NOT restore these force-flags.
4859 *
4860 * @returns Strict VBox status code (i.e. informational status codes too).
4861 * @param pVCpu The cross context virtual CPU structure.
4862 * @param pVmcsInfo The VMCS information structure.
4863 * @param pfIntrState Where to store the updated VMX guest-interruptibility
4864 * state.
4865 */
4866static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState)
4867{
4868 Assert(pfIntrState);
4869 Assert(!TRPMHasTrap(pVCpu));
4870
4871 *pfIntrState = vmxHCGetGuestIntrStateWithUpdate(pVCpu);
4872
4873 /*
4874 * Evaluate if a new event needs to be injected.
4875 * An event that's already pending has already performed all necessary checks.
4876 */
4877 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4878 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
4879 {
4880 /** @todo SMI. SMIs take priority over NMIs. */
4881
4882 /*
4883 * NMIs.
4884 * NMIs take priority over external interrupts.
4885 */
4886 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4887 {
4888 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4889 {
4890 /* Finally, inject the NMI and we're done. */
4891 vmxHCSetPendingXcptNmi(pVCpu);
4892 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4893 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4894 return VINF_SUCCESS;
4895 }
4896 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4897 }
4898 else
4899 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4900
4901 /*
4902 * External interrupts (PIC/APIC).
4903 */
4904 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4905 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4906 {
4907 Assert(!DBGFIsStepping(pVCpu));
4908 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4909 AssertRC(rc);
4910
4911 if (pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF)
4912 {
4913 /*
4914 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it.
4915 * We cannot re-request the interrupt from the controller again.
4916 */
4917 uint8_t u8Interrupt;
4918 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4919 if (RT_SUCCESS(rc))
4920 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4921 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4922 {
4923 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4924 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4925 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4926 /*
4927 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4928 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4929 * need to re-set this force-flag here.
4930 */
4931 }
4932 else
4933 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4934
4935 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4936 return VINF_SUCCESS;
4937 }
4938 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4939 }
4940 else
4941 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4942 }
4943 else
4944 {
4945 /*
4946 * An event is being injected or we are in an interrupt shadow.
4947 * If another event is pending currently, instruct VT-x to cause a VM-exit as
4948 * soon as the guest is ready to accept it.
4949 */
4950 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4951 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4952 else
4953 {
4954 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT));
4955 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4956 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4957 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4958 else
4959 {
4960 /* It's possible that interrupt-window exiting is still active, clear it as it's now unnecessary. */
4961 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
4962 }
4963 }
4964 }
4965
4966 return VINF_SUCCESS;
4967}
4968
4969
4970#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4971/**
4972 * Evaluates the event to be delivered to the nested-guest and sets it as the
4973 * pending event.
4974 *
4975 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4976 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4977 * NOT restore these force-flags.
4978 *
4979 * @returns Strict VBox status code (i.e. informational status codes too).
4980 * @param pVCpu The cross context virtual CPU structure.
4981 * @param pVmcsInfo The VMCS information structure.
4982 * @param pfIntrState Where to store the updated VMX guest-interruptibility
4983 * state.
4984 *
4985 * @remarks The guest must be in VMX non-root mode.
4986 */
4987static VBOXSTRICTRC vmxHCEvaluatePendingEventNested(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t *pfIntrState)
4988{
4989 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4990
4991 Assert(pfIntrState);
4992 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
4993 Assert(!TRPMHasTrap(pVCpu));
4994
4995 *pfIntrState = vmxHCGetGuestIntrStateWithUpdate(pVCpu);
4996
4997 /*
4998 * If we are injecting an event, all necessary checks have been performed.
4999 * Any interrupt-window or NMI-window exiting would have been setup by the
5000 * nested-guest while we merged controls.
5001 */
5002 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5003 return VINF_SUCCESS;
5004
5005 /*
5006 * An event injected by VMLAUNCH/VMRESUME instruction emulation should've been
5007 * made pending (TRPM to HM event) and would be handled above if we resumed
5008 * execution in HM. If somehow we fell back to emulation after the
5009 * VMLAUNCH/VMRESUME instruction, it would have been handled in iemRaiseXcptOrInt
5010 * (calling iemVmxVmexitEvent). Thus, if we get here the nested-hypervisor's VMX
5011 * intercepts should be active and any events pending here have been generated
5012 * while executing the guest in VMX non-root mode after virtual VM-entry completed.
5013 */
5014 Assert(CPUMIsGuestVmxInterceptEvents(pCtx));
5015
5016 /*
5017 * Interrupt shadows MAY block NMIs.
5018 * They also blocks external-interrupts and MAY block external-interrupt VM-exits.
5019 *
5020 * See Intel spec. 24.4.2 "Guest Non-Register State".
5021 * See Intel spec. 25.4.1 "Event Blocking".
5022 */
5023 if (!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
5024 { /* likely */ }
5025 else
5026 return VINF_SUCCESS;
5027
5028 /** @todo SMI. SMIs take priority over NMIs. */
5029
5030 /*
5031 * NMIs.
5032 * NMIs take priority over interrupts.
5033 */
5034 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
5035 {
5036 /*
5037 * Nested-guest NMI-window exiting.
5038 * The NMI-window exit must happen regardless of whether an NMI is pending
5039 * provided virtual-NMI blocking is not in effect.
5040 *
5041 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5042 */
5043 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
5044 && !CPUMIsGuestVmxVirtNmiBlocking(pCtx))
5045 {
5046 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
5047 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
5048 }
5049
5050 /*
5051 * For a nested-guest, the FF always indicates the outer guest's ability to
5052 * receive an NMI while the guest-interruptibility state bit depends on whether
5053 * the nested-hypervisor is using virtual-NMIs.
5054 *
5055 * It is very important that we also clear the force-flag if we are causing
5056 * an NMI VM-exit as it is the responsibility of the nested-hypervisor to deal
5057 * with re-injecting or discarding the NMI. This fixes the bug that showed up
5058 * with SMP Windows Server 2008 R2 with Hyper-V enabled, see @bugref{10318#c19}.
5059 */
5060 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))
5061 {
5062 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
5063 return IEMExecVmxVmexitXcptNmi(pVCpu);
5064 vmxHCSetPendingXcptNmi(pVCpu);
5065 return VINF_SUCCESS;
5066 }
5067 }
5068
5069 /*
5070 * Nested-guest interrupt-window exiting.
5071 *
5072 * We must cause the interrupt-window exit regardless of whether an interrupt is pending
5073 * provided virtual interrupts are enabled.
5074 *
5075 * See Intel spec. 25.2 "Other Causes Of VM Exits".
5076 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5077 */
5078 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
5079 && CPUMIsGuestVmxVirtIntrEnabled(pCtx))
5080 {
5081 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
5082 return IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
5083 }
5084
5085 /*
5086 * External interrupts (PIC/APIC).
5087 *
5088 * When "External interrupt exiting" is set the VM-exit happens regardless of RFLAGS.IF.
5089 * When it isn't set, RFLAGS.IF controls delivery of the interrupt as always.
5090 * This fixes a nasty SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued
5091 * by other VM-exits (like a preemption timer), see @bugref{9562#c18}.
5092 *
5093 * NMIs block external interrupts as they are dispatched through the interrupt gate (vector 2)
5094 * which automatically clears EFLAGS.IF. Also it's possible an NMI handler could enable interrupts
5095 * and thus we should not check for NMI inhibition here.
5096 *
5097 * See Intel spec. 25.4.1 "Event Blocking".
5098 * See Intel spec. 6.8.1 "Masking Maskable Hardware Interrupts".
5099 */
5100 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
5101 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5102 {
5103 Assert(!DBGFIsStepping(pVCpu));
5104 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
5105 AssertRC(rc);
5106 if (CPUMIsGuestVmxPhysIntrEnabled(pCtx))
5107 {
5108 /* Nested-guest external interrupt VM-exit. */
5109 if ( CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
5110 && !CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
5111 {
5112 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
5113 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5114 return rcStrict;
5115 }
5116
5117 /*
5118 * Fetch the external interrupt from the interrupt controller.
5119 * Once PDMGetInterrupt() returns an interrupt we -must- deliver it or pass it to
5120 * the nested-hypervisor. We cannot re-request the interrupt from the controller again.
5121 */
5122 uint8_t u8Interrupt;
5123 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5124 if (RT_SUCCESS(rc))
5125 {
5126 /* Nested-guest external interrupt VM-exit when the "acknowledge interrupt on exit" is enabled. */
5127 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
5128 {
5129 Assert(CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT));
5130 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
5131 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5132 return rcStrict;
5133 }
5134 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
5135 return VINF_SUCCESS;
5136 }
5137 }
5138 }
5139 return VINF_SUCCESS;
5140}
5141#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5142
5143
5144/**
5145 * Injects any pending events into the guest if the guest is in a state to
5146 * receive them.
5147 *
5148 * @returns Strict VBox status code (i.e. informational status codes too).
5149 * @param pVCpu The cross context virtual CPU structure.
5150 * @param pVmcsInfo The VMCS information structure.
5151 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5152 * @param fIntrState The VT-x guest-interruptibility state.
5153 * @param fStepping Whether we are single-stepping the guest using the
5154 * hypervisor debugger and should return
5155 * VINF_EM_DBG_STEPPED if the event was dispatched
5156 * directly.
5157 */
5158static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5159 uint32_t fIntrState, bool fStepping)
5160{
5161 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5162#ifndef IN_NEM_DARWIN
5163 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5164#endif
5165
5166#ifdef VBOX_STRICT
5167 /*
5168 * Verify guest-interruptibility state.
5169 *
5170 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5171 * since injecting an event may modify the interruptibility state and we must thus always
5172 * use fIntrState.
5173 */
5174 {
5175 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5176 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5177 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5178 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5179 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5180 Assert(!TRPMHasTrap(pVCpu));
5181 NOREF(fBlockMovSS); NOREF(fBlockSti);
5182 }
5183#endif
5184
5185 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5186 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5187 {
5188 /*
5189 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5190 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5191 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5192 *
5193 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5194 */
5195 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5196#ifdef VBOX_STRICT
5197 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5198 {
5199 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5200 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5201 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5202 }
5203 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5204 {
5205 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5206 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5207 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5208 }
5209#endif
5210 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5211 uIntType));
5212
5213 /*
5214 * Inject the event and get any changes to the guest-interruptibility state.
5215 *
5216 * The guest-interruptibility state may need to be updated if we inject the event
5217 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5218 */
5219 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5220 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5221
5222 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5223 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5224 else
5225 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5226 }
5227
5228 /*
5229 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5230 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5231 */
5232 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5233 && !fIsNestedGuest)
5234 {
5235 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5236
5237 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5238 {
5239 /*
5240 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5241 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5242 */
5243 Assert(!DBGFIsStepping(pVCpu));
5244 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5245 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5246 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5247 AssertRC(rc);
5248 }
5249 else
5250 {
5251 /*
5252 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5253 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5254 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5255 * we use MTF, so just make sure it's called before executing guest-code.
5256 */
5257 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5258 }
5259 }
5260 /* else: for nested-guest currently handling while merging controls. */
5261
5262 /*
5263 * Finally, update the guest-interruptibility state.
5264 *
5265 * This is required for the real-on-v86 software interrupt injection, for
5266 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5267 */
5268 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5269 AssertRC(rc);
5270
5271 /*
5272 * There's no need to clear the VM-entry interruption-information field here if we're not
5273 * injecting anything. VT-x clears the valid bit on every VM-exit.
5274 *
5275 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5276 */
5277
5278 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5279 return rcStrict;
5280}
5281
5282
5283/**
5284 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5285 * and update error record fields accordingly.
5286 *
5287 * @returns VMX_IGS_* error codes.
5288 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5289 * wrong with the guest state.
5290 *
5291 * @param pVCpu The cross context virtual CPU structure.
5292 * @param pVmcsInfo The VMCS info. object.
5293 *
5294 * @remarks This function assumes our cache of the VMCS controls
5295 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5296 */
5297static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5298{
5299#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5300#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { uError = (err); break; } else do { } while (0)
5301
5302 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5303 uint32_t uError = VMX_IGS_ERROR;
5304 uint32_t u32IntrState = 0;
5305#ifndef IN_NEM_DARWIN
5306 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5307 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5308#else
5309 bool const fUnrestrictedGuest = true;
5310#endif
5311 do
5312 {
5313 int rc;
5314
5315 /*
5316 * Guest-interruptibility state.
5317 *
5318 * Read this first so that any check that fails prior to those that actually
5319 * require the guest-interruptibility state would still reflect the correct
5320 * VMCS value and avoids causing further confusion.
5321 */
5322 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5323 AssertRC(rc);
5324
5325 uint32_t u32Val;
5326 uint64_t u64Val;
5327
5328 /*
5329 * CR0.
5330 */
5331 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5332 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5333 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5334 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5335 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5336 if (fUnrestrictedGuest)
5337 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5338
5339 uint64_t u64GuestCr0;
5340 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5341 AssertRC(rc);
5342 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5343 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5344 if ( !fUnrestrictedGuest
5345 && (u64GuestCr0 & X86_CR0_PG)
5346 && !(u64GuestCr0 & X86_CR0_PE))
5347 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5348
5349 /*
5350 * CR4.
5351 */
5352 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5353 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5354 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5355
5356 uint64_t u64GuestCr4;
5357 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5358 AssertRC(rc);
5359 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5360 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5361
5362 /*
5363 * IA32_DEBUGCTL MSR.
5364 */
5365 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5366 AssertRC(rc);
5367 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5368 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5369 {
5370 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5371 }
5372 uint64_t u64DebugCtlMsr = u64Val;
5373
5374#ifdef VBOX_STRICT
5375 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5376 AssertRC(rc);
5377 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5378#endif
5379 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5380
5381 /*
5382 * RIP and RFLAGS.
5383 */
5384 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5385 AssertRC(rc);
5386 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5387 if ( !fLongModeGuest
5388 || !pCtx->cs.Attr.n.u1Long)
5389 {
5390 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5391 }
5392 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5393 * must be identical if the "IA-32e mode guest" VM-entry
5394 * control is 1 and CS.L is 1. No check applies if the
5395 * CPU supports 64 linear-address bits. */
5396
5397 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5398 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5399 AssertRC(rc);
5400 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5401 VMX_IGS_RFLAGS_RESERVED);
5402 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5403 uint32_t const u32Eflags = u64Val;
5404
5405 if ( fLongModeGuest
5406 || ( fUnrestrictedGuest
5407 && !(u64GuestCr0 & X86_CR0_PE)))
5408 {
5409 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5410 }
5411
5412 uint32_t u32EntryInfo;
5413 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5414 AssertRC(rc);
5415 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5416 {
5417 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5418 }
5419
5420 /*
5421 * 64-bit checks.
5422 */
5423 if (fLongModeGuest)
5424 {
5425 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5426 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5427 }
5428
5429 if ( !fLongModeGuest
5430 && (u64GuestCr4 & X86_CR4_PCIDE))
5431 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5432
5433 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5434 * 51:32 beyond the processor's physical-address width are 0. */
5435
5436 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5437 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5438 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5439
5440#ifndef IN_NEM_DARWIN
5441 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5442 AssertRC(rc);
5443 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5444
5445 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5446 AssertRC(rc);
5447 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5448#endif
5449
5450 /*
5451 * PERF_GLOBAL MSR.
5452 */
5453 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5454 {
5455 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5456 AssertRC(rc);
5457 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5458 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5459 }
5460
5461 /*
5462 * PAT MSR.
5463 */
5464 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5465 {
5466 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5467 AssertRC(rc);
5468 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5469 for (unsigned i = 0; i < 8; i++)
5470 {
5471 uint8_t u8Val = (u64Val & 0xff);
5472 if ( u8Val != 0 /* UC */
5473 && u8Val != 1 /* WC */
5474 && u8Val != 4 /* WT */
5475 && u8Val != 5 /* WP */
5476 && u8Val != 6 /* WB */
5477 && u8Val != 7 /* UC- */)
5478 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5479 u64Val >>= 8;
5480 }
5481 }
5482
5483 /*
5484 * EFER MSR.
5485 */
5486 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5487 {
5488 Assert(g_fHmVmxSupportsVmcsEfer);
5489 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5490 AssertRC(rc);
5491 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5492 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5493 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5494 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5495 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5496 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5497 * iemVmxVmentryCheckGuestState(). */
5498 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5499 || !(u64GuestCr0 & X86_CR0_PG)
5500 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5501 VMX_IGS_EFER_LMA_LME_MISMATCH);
5502 }
5503
5504 /*
5505 * Segment registers.
5506 */
5507 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5508 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5509 if (!(u32Eflags & X86_EFL_VM))
5510 {
5511 /* CS */
5512 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5513 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5514 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5515 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5516 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5517 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5518 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5519 /* CS cannot be loaded with NULL in protected mode. */
5520 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5521 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5522 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5523 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5524 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5525 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5526 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5527 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5528 else
5529 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5530
5531 /* SS */
5532 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5533 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5534 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5535 if ( !(pCtx->cr0 & X86_CR0_PE)
5536 || pCtx->cs.Attr.n.u4Type == 3)
5537 {
5538 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5539 }
5540
5541 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5542 {
5543 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5544 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5545 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5546 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5547 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5548 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5549 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5550 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5551 }
5552
5553 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5554 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5555 {
5556 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5557 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5558 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5559 || pCtx->ds.Attr.n.u4Type > 11
5560 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5561 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5562 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5563 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5564 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5565 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5566 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5567 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5568 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5569 }
5570 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5571 {
5572 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5573 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5574 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5575 || pCtx->es.Attr.n.u4Type > 11
5576 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5577 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5578 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5579 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5580 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5581 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5582 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5583 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5584 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5585 }
5586 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5587 {
5588 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5589 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5590 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5591 || pCtx->fs.Attr.n.u4Type > 11
5592 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5593 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5594 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5595 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5596 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5597 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5598 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5599 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5600 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5601 }
5602 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5603 {
5604 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5605 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5606 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5607 || pCtx->gs.Attr.n.u4Type > 11
5608 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5609 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5610 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5611 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5612 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5613 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5614 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5615 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5616 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5617 }
5618 /* 64-bit capable CPUs. */
5619 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5620 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5621 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5622 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5623 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5624 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5625 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5626 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5627 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5628 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5629 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5630 }
5631 else
5632 {
5633 /* V86 mode checks. */
5634 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5635 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5636 {
5637 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5638 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5639 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5640 }
5641 else
5642 {
5643 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5644 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5645 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5646 }
5647
5648 /* CS */
5649 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5650 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5651 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5652 /* SS */
5653 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5654 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5655 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5656 /* DS */
5657 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5658 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5659 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5660 /* ES */
5661 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5662 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5663 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5664 /* FS */
5665 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5666 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5667 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5668 /* GS */
5669 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5670 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5671 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5672 /* 64-bit capable CPUs. */
5673 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5674 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5675 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5676 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5677 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5678 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5679 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5680 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5681 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5682 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5683 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5684 }
5685
5686 /*
5687 * TR.
5688 */
5689 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5690 /* 64-bit capable CPUs. */
5691 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5692 if (fLongModeGuest)
5693 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5694 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5695 else
5696 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5697 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5698 VMX_IGS_TR_ATTR_TYPE_INVALID);
5699 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5700 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5701 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5702 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5703 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5704 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5705 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5706 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5707
5708 /*
5709 * GDTR and IDTR (64-bit capable checks).
5710 */
5711 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5712 AssertRC(rc);
5713 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5714
5715 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5716 AssertRC(rc);
5717 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5718
5719 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5720 AssertRC(rc);
5721 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5722
5723 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5724 AssertRC(rc);
5725 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5726
5727 /*
5728 * Guest Non-Register State.
5729 */
5730 /* Activity State. */
5731 uint32_t u32ActivityState;
5732 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5733 AssertRC(rc);
5734 HMVMX_CHECK_BREAK( !u32ActivityState
5735 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5736 VMX_IGS_ACTIVITY_STATE_INVALID);
5737 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5738 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5739
5740 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5741 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5742 {
5743 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5744 }
5745
5746 /** @todo Activity state and injecting interrupts. Left as a todo since we
5747 * currently don't use activity states but ACTIVE. */
5748
5749 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5750 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5751
5752 /* Guest interruptibility-state. */
5753 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5754 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5755 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5756 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5757 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5758 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5759 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5760 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5761 {
5762 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5763 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5764 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5765 }
5766 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5767 {
5768 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5769 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5770 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5771 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5772 }
5773 /** @todo Assumes the processor is not in SMM. */
5774 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5775 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5776 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5777 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5778 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5779 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5780 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5781 {
5782 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5783 }
5784
5785 /* Pending debug exceptions. */
5786 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5787 AssertRC(rc);
5788 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5789 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5790 u32Val = u64Val; /* For pending debug exceptions checks below. */
5791
5792 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5793 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5794 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5795 {
5796 if ( (u32Eflags & X86_EFL_TF)
5797 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5798 {
5799 /* Bit 14 is PendingDebug.BS. */
5800 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5801 }
5802 if ( !(u32Eflags & X86_EFL_TF)
5803 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5804 {
5805 /* Bit 14 is PendingDebug.BS. */
5806 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5807 }
5808 }
5809
5810#ifndef IN_NEM_DARWIN
5811 /* VMCS link pointer. */
5812 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5813 AssertRC(rc);
5814 if (u64Val != UINT64_C(0xffffffffffffffff))
5815 {
5816 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5817 /** @todo Bits beyond the processor's physical-address width MBZ. */
5818 /** @todo SMM checks. */
5819 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5820 Assert(pVmcsInfo->pvShadowVmcs);
5821 VMXVMCSREVID VmcsRevId;
5822 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5823 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5824 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5825 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5826 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5827 }
5828
5829 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5830 * not using nested paging? */
5831 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5832 && !fLongModeGuest
5833 && CPUMIsGuestInPAEModeEx(pCtx))
5834 {
5835 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5836 AssertRC(rc);
5837 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5838
5839 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5840 AssertRC(rc);
5841 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5842
5843 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5844 AssertRC(rc);
5845 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5846
5847 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5848 AssertRC(rc);
5849 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5850 }
5851#endif
5852
5853 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5854 if (uError == VMX_IGS_ERROR)
5855 uError = VMX_IGS_REASON_NOT_FOUND;
5856 } while (0);
5857
5858 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5859 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5860 return uError;
5861
5862#undef HMVMX_ERROR_BREAK
5863#undef HMVMX_CHECK_BREAK
5864}
5865
5866
5867#ifndef HMVMX_USE_FUNCTION_TABLE
5868/**
5869 * Handles a guest VM-exit from hardware-assisted VMX execution.
5870 *
5871 * @returns Strict VBox status code (i.e. informational status codes too).
5872 * @param pVCpu The cross context virtual CPU structure.
5873 * @param pVmxTransient The VMX-transient structure.
5874 */
5875DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5876{
5877#ifdef DEBUG_ramshankar
5878# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5879 do { \
5880 if (a_fSave != 0) \
5881 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5882 VBOXSTRICTRC rcStrict = a_CallExpr; \
5883 if (a_fSave != 0) \
5884 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5885 return rcStrict; \
5886 } while (0)
5887#else
5888# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5889#endif
5890 uint32_t const uExitReason = pVmxTransient->uExitReason;
5891 switch (uExitReason)
5892 {
5893 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5894 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5895 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5896 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5897 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5898 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5899 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5900 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5901 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5902 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5903 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5904 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5905 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5906 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5907 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5908 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5909 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5910 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5911 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5912 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5913 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5914 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5915 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5916 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5917 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5918 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5919 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5920 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5921 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5922 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5923#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5924 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5925 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5926 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5927 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5928 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5929 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5930 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5931 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5932 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5933 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5934#else
5935 case VMX_EXIT_VMCLEAR:
5936 case VMX_EXIT_VMLAUNCH:
5937 case VMX_EXIT_VMPTRLD:
5938 case VMX_EXIT_VMPTRST:
5939 case VMX_EXIT_VMREAD:
5940 case VMX_EXIT_VMRESUME:
5941 case VMX_EXIT_VMWRITE:
5942 case VMX_EXIT_VMXOFF:
5943 case VMX_EXIT_VMXON:
5944 case VMX_EXIT_INVVPID:
5945 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5946#endif
5947#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5948 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5949#else
5950 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5951#endif
5952
5953 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5954 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5955 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5956
5957 case VMX_EXIT_INIT_SIGNAL:
5958 case VMX_EXIT_SIPI:
5959 case VMX_EXIT_IO_SMI:
5960 case VMX_EXIT_SMI:
5961 case VMX_EXIT_ERR_MSR_LOAD:
5962 case VMX_EXIT_ERR_MACHINE_CHECK:
5963 case VMX_EXIT_PML_FULL:
5964 case VMX_EXIT_VIRTUALIZED_EOI:
5965 case VMX_EXIT_GDTR_IDTR_ACCESS:
5966 case VMX_EXIT_LDTR_TR_ACCESS:
5967 case VMX_EXIT_APIC_WRITE:
5968 case VMX_EXIT_RDRAND:
5969 case VMX_EXIT_RSM:
5970 case VMX_EXIT_VMFUNC:
5971 case VMX_EXIT_ENCLS:
5972 case VMX_EXIT_RDSEED:
5973 case VMX_EXIT_XSAVES:
5974 case VMX_EXIT_XRSTORS:
5975 case VMX_EXIT_UMWAIT:
5976 case VMX_EXIT_TPAUSE:
5977 case VMX_EXIT_LOADIWKEY:
5978 default:
5979 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5980 }
5981#undef VMEXIT_CALL_RET
5982}
5983#endif /* !HMVMX_USE_FUNCTION_TABLE */
5984
5985
5986#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5987/**
5988 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5989 *
5990 * @returns Strict VBox status code (i.e. informational status codes too).
5991 * @param pVCpu The cross context virtual CPU structure.
5992 * @param pVmxTransient The VMX-transient structure.
5993 */
5994DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5995{
5996#ifdef DEBUG_ramshankar
5997# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5998 do { \
5999 if (a_fSave != 0) \
6000 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
6001 VBOXSTRICTRC rcStrict = a_CallExpr; \
6002 return rcStrict; \
6003 } while (0)
6004#else
6005# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
6006#endif
6007
6008 uint32_t const uExitReason = pVmxTransient->uExitReason;
6009 switch (uExitReason)
6010 {
6011# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6012 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient));
6013 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolationNested(pVCpu, pVmxTransient));
6014# else
6015 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
6016 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
6017# endif
6018 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient));
6019 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstrNested(pVCpu, pVmxTransient));
6020 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHltNested(pVCpu, pVmxTransient));
6021
6022 /*
6023 * We shouldn't direct host physical interrupts to the nested-guest.
6024 */
6025 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
6026
6027 /*
6028 * Instructions that cause VM-exits unconditionally or the condition is
6029 * always taken solely from the nested hypervisor (meaning if the VM-exit
6030 * happens, it's guaranteed to be a nested-guest VM-exit).
6031 *
6032 * - Provides VM-exit instruction length ONLY.
6033 */
6034 case VMX_EXIT_CPUID: /* Unconditional. */
6035 case VMX_EXIT_VMCALL:
6036 case VMX_EXIT_GETSEC:
6037 case VMX_EXIT_INVD:
6038 case VMX_EXIT_XSETBV:
6039 case VMX_EXIT_VMLAUNCH:
6040 case VMX_EXIT_VMRESUME:
6041 case VMX_EXIT_VMXOFF:
6042 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
6043 case VMX_EXIT_VMFUNC:
6044 VMEXIT_CALL_RET(0, vmxHCExitInstrNested(pVCpu, pVmxTransient));
6045
6046 /*
6047 * Instructions that cause VM-exits unconditionally or the condition is
6048 * always taken solely from the nested hypervisor (meaning if the VM-exit
6049 * happens, it's guaranteed to be a nested-guest VM-exit).
6050 *
6051 * - Provides VM-exit instruction length.
6052 * - Provides VM-exit information.
6053 * - Optionally provides Exit qualification.
6054 *
6055 * Since Exit qualification is 0 for all VM-exits where it is not
6056 * applicable, reading and passing it to the guest should produce
6057 * defined behavior.
6058 *
6059 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
6060 */
6061 case VMX_EXIT_INVEPT: /* Unconditional. */
6062 case VMX_EXIT_INVVPID:
6063 case VMX_EXIT_VMCLEAR:
6064 case VMX_EXIT_VMPTRLD:
6065 case VMX_EXIT_VMPTRST:
6066 case VMX_EXIT_VMXON:
6067 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
6068 case VMX_EXIT_LDTR_TR_ACCESS:
6069 case VMX_EXIT_RDRAND:
6070 case VMX_EXIT_RDSEED:
6071 case VMX_EXIT_XSAVES:
6072 case VMX_EXIT_XRSTORS:
6073 case VMX_EXIT_UMWAIT:
6074 case VMX_EXIT_TPAUSE:
6075 VMEXIT_CALL_RET(0, vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient));
6076
6077 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtscNested(pVCpu, pVmxTransient));
6078 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscpNested(pVCpu, pVmxTransient));
6079 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsrNested(pVCpu, pVmxTransient));
6080 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsrNested(pVCpu, pVmxTransient));
6081 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpgNested(pVCpu, pVmxTransient));
6082 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcidNested(pVCpu, pVmxTransient));
6083 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient));
6084 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvdNested(pVCpu, pVmxTransient));
6085 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtfNested(pVCpu, pVmxTransient));
6086 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccessNested(pVCpu, pVmxTransient));
6087 case VMX_EXIT_APIC_WRITE: VMEXIT_CALL_RET(0, vmxHCExitApicWriteNested(pVCpu, pVmxTransient));
6088 case VMX_EXIT_VIRTUALIZED_EOI: VMEXIT_CALL_RET(0, vmxHCExitVirtEoiNested(pVCpu, pVmxTransient));
6089 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRxNested(pVCpu, pVmxTransient));
6090 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindowNested(pVCpu, pVmxTransient));
6091 case VMX_EXIT_NMI_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitNmiWindowNested(pVCpu, pVmxTransient));
6092 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient));
6093 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwaitNested(pVCpu, pVmxTransient));
6094 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitorNested(pVCpu, pVmxTransient));
6095 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPauseNested(pVCpu, pVmxTransient));
6096
6097 case VMX_EXIT_PREEMPT_TIMER:
6098 {
6099 /** @todo NSTVMX: Preempt timer. */
6100 VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
6101 }
6102
6103 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRxNested(pVCpu, pVmxTransient));
6104 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmcNested(pVCpu, pVmxTransient));
6105
6106 case VMX_EXIT_VMREAD:
6107 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient));
6108
6109 case VMX_EXIT_TRIPLE_FAULT: VMEXIT_CALL_RET(0, vmxHCExitTripleFaultNested(pVCpu, pVmxTransient));
6110 case VMX_EXIT_ERR_INVALID_GUEST_STATE: VMEXIT_CALL_RET(0, vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient));
6111
6112 case VMX_EXIT_INIT_SIGNAL:
6113 case VMX_EXIT_SIPI:
6114 case VMX_EXIT_IO_SMI:
6115 case VMX_EXIT_SMI:
6116 case VMX_EXIT_ERR_MSR_LOAD:
6117 case VMX_EXIT_ERR_MACHINE_CHECK:
6118 case VMX_EXIT_PML_FULL:
6119 case VMX_EXIT_RSM:
6120 default:
6121 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
6122 }
6123#undef VMEXIT_CALL_RET
6124}
6125#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6126
6127
6128/** @name VM-exit helpers.
6129 * @{
6130 */
6131/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6132/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6133/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6134
6135/** Macro for VM-exits called unexpectedly. */
6136#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
6137 do { \
6138 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
6139 return VERR_VMX_UNEXPECTED_EXIT; \
6140 } while (0)
6141
6142#ifdef VBOX_STRICT
6143# ifndef IN_NEM_DARWIN
6144/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6145# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
6146 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6147
6148# define HMVMX_ASSERT_PREEMPT_CPUID() \
6149 do { \
6150 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6151 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6152 } while (0)
6153
6154# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6155 do { \
6156 AssertPtr((a_pVCpu)); \
6157 AssertPtr((a_pVmxTransient)); \
6158 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6159 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6160 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6161 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6162 Assert((a_pVmxTransient)->pVmcsInfo); \
6163 Assert(ASMIntAreEnabled()); \
6164 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6165 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6166 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6167 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6168 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6169 HMVMX_ASSERT_PREEMPT_CPUID(); \
6170 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6171 } while (0)
6172# else
6173# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6174# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6175# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6176 do { \
6177 AssertPtr((a_pVCpu)); \
6178 AssertPtr((a_pVmxTransient)); \
6179 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6180 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6181 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6182 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6183 Assert((a_pVmxTransient)->pVmcsInfo); \
6184 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6185 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6186 } while (0)
6187# endif
6188
6189# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6190 do { \
6191 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6192 Assert((a_pVmxTransient)->fIsNestedGuest); \
6193 } while (0)
6194
6195# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6196 do { \
6197 Log4Func(("\n")); \
6198 } while (0)
6199#else
6200# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6201 do { \
6202 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6203 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6204 } while (0)
6205
6206# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6207 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6208
6209# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6210#endif
6211
6212#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6213/** Macro that does the necessary privilege checks and intercepted VM-exits for
6214 * guests that attempted to execute a VMX instruction. */
6215# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6216 do \
6217 { \
6218 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6219 if (rcStrictTmp == VINF_SUCCESS) \
6220 { /* likely */ } \
6221 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6222 { \
6223 Assert((a_pVCpu)->hm.s.Event.fPending); \
6224 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6225 return VINF_SUCCESS; \
6226 } \
6227 else \
6228 { \
6229 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6230 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6231 } \
6232 } while (0)
6233
6234/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6235# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6236 do \
6237 { \
6238 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6239 (a_pGCPtrEffAddr)); \
6240 if (rcStrictTmp == VINF_SUCCESS) \
6241 { /* likely */ } \
6242 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6243 { \
6244 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6245 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6246 NOREF(uXcptTmp); \
6247 return VINF_SUCCESS; \
6248 } \
6249 else \
6250 { \
6251 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6252 return rcStrictTmp; \
6253 } \
6254 } while (0)
6255#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6256
6257
6258/**
6259 * Advances the guest RIP by the specified number of bytes.
6260 *
6261 * @param pVCpu The cross context virtual CPU structure.
6262 * @param cbInstr Number of bytes to advance the RIP by.
6263 *
6264 * @remarks No-long-jump zone!!!
6265 */
6266DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6267{
6268 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6269
6270 /*
6271 * Advance RIP.
6272 *
6273 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6274 * when the addition causes a "carry" into the upper half and check whether
6275 * we're in 64-bit and can go on with it or wether we should zap the top
6276 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6277 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6278 *
6279 * See PC wrap around tests in bs3-cpu-weird-1.
6280 */
6281 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6282 uint64_t const uRipNext = uRipPrev + cbInstr;
6283 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6284 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6285 pVCpu->cpum.GstCtx.rip = uRipNext;
6286 else
6287 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6288
6289 /*
6290 * Clear RF and interrupt shadowing.
6291 */
6292 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6293 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6294 else
6295 {
6296 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6297 {
6298 /** @todo \#DB - single step. */
6299 }
6300 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6301 }
6302 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6303
6304 /* Mark both RIP and RFLAGS as updated. */
6305 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6306}
6307
6308
6309/**
6310 * Advances the guest RIP after reading it from the VMCS.
6311 *
6312 * @returns VBox status code, no informational status codes.
6313 * @param pVCpu The cross context virtual CPU structure.
6314 * @param pVmxTransient The VMX-transient structure.
6315 *
6316 * @remarks No-long-jump zone!!!
6317 */
6318static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6319{
6320 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6321 /** @todo consider template here after checking callers. */
6322 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6323 AssertRCReturn(rc, rc);
6324
6325 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6326 return VINF_SUCCESS;
6327}
6328
6329
6330/**
6331 * Handle a condition that occurred while delivering an event through the guest or
6332 * nested-guest IDT.
6333 *
6334 * @returns Strict VBox status code (i.e. informational status codes too).
6335 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6336 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6337 * to continue execution of the guest which will delivery the \#DF.
6338 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6339 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6340 *
6341 * @param pVCpu The cross context virtual CPU structure.
6342 * @param pVmxTransient The VMX-transient structure.
6343 *
6344 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6345 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6346 * is due to an EPT violation, PML full or SPP-related event.
6347 *
6348 * @remarks No-long-jump zone!!!
6349 */
6350static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6351{
6352 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6353 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6354 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6355 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6356 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6357 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6358
6359 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6360 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6361 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6362 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6363 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6364 {
6365 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6366 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6367
6368 /*
6369 * If the event was a software interrupt (generated with INT n) or a software exception
6370 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6371 * can handle the VM-exit and continue guest execution which will re-execute the
6372 * instruction rather than re-injecting the exception, as that can cause premature
6373 * trips to ring-3 before injection and involve TRPM which currently has no way of
6374 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6375 * the problem).
6376 */
6377 IEMXCPTRAISE enmRaise;
6378 IEMXCPTRAISEINFO fRaiseInfo;
6379 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6380 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6381 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6382 {
6383 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6384 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6385 }
6386 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6387 {
6388 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6389 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6390 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6391
6392 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6393 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6394
6395 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6396
6397 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6398 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6399 {
6400 pVmxTransient->fVectoringPF = true;
6401 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6402 }
6403 }
6404 else
6405 {
6406 /*
6407 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6408 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6409 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6410 */
6411 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6412 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6413 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6414 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6415 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6416 }
6417
6418 /*
6419 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6420 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6421 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6422 * subsequent VM-entry would fail, see @bugref{7445}.
6423 *
6424 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6425 */
6426 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6427 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6428 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6429 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6430 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6431
6432 switch (enmRaise)
6433 {
6434 case IEMXCPTRAISE_CURRENT_XCPT:
6435 {
6436 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6437 Assert(rcStrict == VINF_SUCCESS);
6438 break;
6439 }
6440
6441 case IEMXCPTRAISE_PREV_EVENT:
6442 {
6443 uint32_t u32ErrCode;
6444 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6445 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6446 else
6447 u32ErrCode = 0;
6448
6449 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6450 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6451 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6452 pVCpu->cpum.GstCtx.cr2);
6453
6454 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6455 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6456 Assert(rcStrict == VINF_SUCCESS);
6457 break;
6458 }
6459
6460 case IEMXCPTRAISE_REEXEC_INSTR:
6461 Assert(rcStrict == VINF_SUCCESS);
6462 break;
6463
6464 case IEMXCPTRAISE_DOUBLE_FAULT:
6465 {
6466 /*
6467 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6468 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6469 */
6470 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6471 {
6472 pVmxTransient->fVectoringDoublePF = true;
6473 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6474 pVCpu->cpum.GstCtx.cr2));
6475 rcStrict = VINF_SUCCESS;
6476 }
6477 else
6478 {
6479 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6480 vmxHCSetPendingXcptDF(pVCpu);
6481 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6482 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6483 rcStrict = VINF_HM_DOUBLE_FAULT;
6484 }
6485 break;
6486 }
6487
6488 case IEMXCPTRAISE_TRIPLE_FAULT:
6489 {
6490 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6491 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6492 rcStrict = VINF_EM_RESET;
6493 break;
6494 }
6495
6496 case IEMXCPTRAISE_CPU_HANG:
6497 {
6498 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6499 rcStrict = VERR_EM_GUEST_CPU_HANG;
6500 break;
6501 }
6502
6503 default:
6504 {
6505 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6506 rcStrict = VERR_VMX_IPE_2;
6507 break;
6508 }
6509 }
6510 }
6511 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6512 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6513 {
6514 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6515 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6516 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6517 {
6518 /*
6519 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6520 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6521 * that virtual NMIs remain blocked until the IRET execution is completed.
6522 *
6523 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6524 */
6525 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6526 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6527 }
6528 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6529 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6530 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6531 {
6532 /*
6533 * Execution of IRET caused an EPT violation, page-modification log-full event or
6534 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6535 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6536 * that virtual NMIs remain blocked until the IRET execution is completed.
6537 *
6538 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6539 */
6540 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6541 {
6542 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6543 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6544 }
6545 }
6546 }
6547
6548 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6549 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6550 return rcStrict;
6551}
6552
6553
6554#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6555/**
6556 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6557 * guest attempting to execute a VMX instruction.
6558 *
6559 * @returns Strict VBox status code (i.e. informational status codes too).
6560 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6561 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6562 *
6563 * @param pVCpu The cross context virtual CPU structure.
6564 * @param uExitReason The VM-exit reason.
6565 *
6566 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6567 * @remarks No-long-jump zone!!!
6568 */
6569static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6570{
6571 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6572 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6573
6574 /*
6575 * The physical CPU would have already checked the CPU mode/code segment.
6576 * We shall just assert here for paranoia.
6577 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6578 */
6579 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6580 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6581 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6582
6583 if (uExitReason == VMX_EXIT_VMXON)
6584 {
6585 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6586
6587 /*
6588 * We check CR4.VMXE because it is required to be always set while in VMX operation
6589 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6590 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6591 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6592 */
6593 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6594 {
6595 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6596 vmxHCSetPendingXcptUD(pVCpu);
6597 return VINF_HM_PENDING_XCPT;
6598 }
6599 }
6600 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6601 {
6602 /*
6603 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6604 * (other than VMXON), we need to raise a #UD.
6605 */
6606 Log4Func(("Not in VMX root mode -> #UD\n"));
6607 vmxHCSetPendingXcptUD(pVCpu);
6608 return VINF_HM_PENDING_XCPT;
6609 }
6610
6611 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6612 return VINF_SUCCESS;
6613}
6614
6615
6616/**
6617 * Decodes the memory operand of an instruction that caused a VM-exit.
6618 *
6619 * The Exit qualification field provides the displacement field for memory
6620 * operand instructions, if any.
6621 *
6622 * @returns Strict VBox status code (i.e. informational status codes too).
6623 * @retval VINF_SUCCESS if the operand was successfully decoded.
6624 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6625 * operand.
6626 * @param pVCpu The cross context virtual CPU structure.
6627 * @param uExitInstrInfo The VM-exit instruction information field.
6628 * @param enmMemAccess The memory operand's access type (read or write).
6629 * @param GCPtrDisp The instruction displacement field, if any. For
6630 * RIP-relative addressing pass RIP + displacement here.
6631 * @param pGCPtrMem Where to store the effective destination memory address.
6632 *
6633 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6634 * virtual-8086 mode hence skips those checks while verifying if the
6635 * segment is valid.
6636 */
6637static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6638 PRTGCPTR pGCPtrMem)
6639{
6640 Assert(pGCPtrMem);
6641 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6642 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6643 | CPUMCTX_EXTRN_CR0);
6644
6645 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6646 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6647 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6648
6649 VMXEXITINSTRINFO ExitInstrInfo;
6650 ExitInstrInfo.u = uExitInstrInfo;
6651 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6652 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6653 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6654 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6655 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6656 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6657 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6658 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6659 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6660
6661 /*
6662 * Validate instruction information.
6663 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6664 */
6665 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6666 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6667 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6668 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6669 AssertLogRelMsgReturn(fIsMemOperand,
6670 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6671
6672 /*
6673 * Compute the complete effective address.
6674 *
6675 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6676 * See AMD spec. 4.5.2 "Segment Registers".
6677 */
6678 RTGCPTR GCPtrMem = GCPtrDisp;
6679 if (fBaseRegValid)
6680 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6681 if (fIdxRegValid)
6682 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6683
6684 RTGCPTR const GCPtrOff = GCPtrMem;
6685 if ( !fIsLongMode
6686 || iSegReg >= X86_SREG_FS)
6687 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6688 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6689
6690 /*
6691 * Validate effective address.
6692 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6693 */
6694 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6695 Assert(cbAccess > 0);
6696 if (fIsLongMode)
6697 {
6698 if (X86_IS_CANONICAL(GCPtrMem))
6699 {
6700 *pGCPtrMem = GCPtrMem;
6701 return VINF_SUCCESS;
6702 }
6703
6704 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6705 * "Data Limit Checks in 64-bit Mode". */
6706 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6707 vmxHCSetPendingXcptGP(pVCpu, 0);
6708 return VINF_HM_PENDING_XCPT;
6709 }
6710
6711 /*
6712 * This is a watered down version of iemMemApplySegment().
6713 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6714 * and segment CPL/DPL checks are skipped.
6715 */
6716 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6717 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6718 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6719
6720 /* Check if the segment is present and usable. */
6721 if ( pSel->Attr.n.u1Present
6722 && !pSel->Attr.n.u1Unusable)
6723 {
6724 Assert(pSel->Attr.n.u1DescType);
6725 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6726 {
6727 /* Check permissions for the data segment. */
6728 if ( enmMemAccess == VMXMEMACCESS_WRITE
6729 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6730 {
6731 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6732 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6733 return VINF_HM_PENDING_XCPT;
6734 }
6735
6736 /* Check limits if it's a normal data segment. */
6737 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6738 {
6739 if ( GCPtrFirst32 > pSel->u32Limit
6740 || GCPtrLast32 > pSel->u32Limit)
6741 {
6742 Log4Func(("Data segment limit exceeded. "
6743 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6744 GCPtrLast32, pSel->u32Limit));
6745 if (iSegReg == X86_SREG_SS)
6746 vmxHCSetPendingXcptSS(pVCpu, 0);
6747 else
6748 vmxHCSetPendingXcptGP(pVCpu, 0);
6749 return VINF_HM_PENDING_XCPT;
6750 }
6751 }
6752 else
6753 {
6754 /* Check limits if it's an expand-down data segment.
6755 Note! The upper boundary is defined by the B bit, not the G bit! */
6756 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6757 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6758 {
6759 Log4Func(("Expand-down data segment limit exceeded. "
6760 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6761 GCPtrLast32, pSel->u32Limit));
6762 if (iSegReg == X86_SREG_SS)
6763 vmxHCSetPendingXcptSS(pVCpu, 0);
6764 else
6765 vmxHCSetPendingXcptGP(pVCpu, 0);
6766 return VINF_HM_PENDING_XCPT;
6767 }
6768 }
6769 }
6770 else
6771 {
6772 /* Check permissions for the code segment. */
6773 if ( enmMemAccess == VMXMEMACCESS_WRITE
6774 || ( enmMemAccess == VMXMEMACCESS_READ
6775 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6776 {
6777 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6778 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6779 vmxHCSetPendingXcptGP(pVCpu, 0);
6780 return VINF_HM_PENDING_XCPT;
6781 }
6782
6783 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6784 if ( GCPtrFirst32 > pSel->u32Limit
6785 || GCPtrLast32 > pSel->u32Limit)
6786 {
6787 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6788 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6789 if (iSegReg == X86_SREG_SS)
6790 vmxHCSetPendingXcptSS(pVCpu, 0);
6791 else
6792 vmxHCSetPendingXcptGP(pVCpu, 0);
6793 return VINF_HM_PENDING_XCPT;
6794 }
6795 }
6796 }
6797 else
6798 {
6799 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6800 vmxHCSetPendingXcptGP(pVCpu, 0);
6801 return VINF_HM_PENDING_XCPT;
6802 }
6803
6804 *pGCPtrMem = GCPtrMem;
6805 return VINF_SUCCESS;
6806}
6807#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6808
6809
6810/**
6811 * VM-exit helper for LMSW.
6812 */
6813static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6814{
6815 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6816 AssertRCReturn(rc, rc);
6817
6818 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6819 AssertMsg( rcStrict == VINF_SUCCESS
6820 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6821
6822 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6823 if (rcStrict == VINF_IEM_RAISED_XCPT)
6824 {
6825 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6826 rcStrict = VINF_SUCCESS;
6827 }
6828
6829 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6830 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6831 return rcStrict;
6832}
6833
6834
6835/**
6836 * VM-exit helper for CLTS.
6837 */
6838static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6839{
6840 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6841 AssertRCReturn(rc, rc);
6842
6843 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6844 AssertMsg( rcStrict == VINF_SUCCESS
6845 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6846
6847 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6848 if (rcStrict == VINF_IEM_RAISED_XCPT)
6849 {
6850 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6851 rcStrict = VINF_SUCCESS;
6852 }
6853
6854 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6855 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6856 return rcStrict;
6857}
6858
6859
6860/**
6861 * VM-exit helper for MOV from CRx (CRx read).
6862 */
6863static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6864{
6865 Assert(iCrReg < 16);
6866 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6867
6868 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6869 AssertRCReturn(rc, rc);
6870
6871 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6872 AssertMsg( rcStrict == VINF_SUCCESS
6873 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6874
6875 if (iGReg == X86_GREG_xSP)
6876 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6877 else
6878 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6879#ifdef VBOX_WITH_STATISTICS
6880 switch (iCrReg)
6881 {
6882 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6883 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6884 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6885 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6886 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6887 }
6888#endif
6889 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6890 return rcStrict;
6891}
6892
6893
6894/**
6895 * VM-exit helper for MOV to CRx (CRx write).
6896 */
6897static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6898{
6899 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6900
6901 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6902 AssertMsg( rcStrict == VINF_SUCCESS
6903 || rcStrict == VINF_IEM_RAISED_XCPT
6904 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6905
6906 switch (iCrReg)
6907 {
6908 case 0:
6909 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6910 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6911 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6912 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6913 break;
6914
6915 case 2:
6916 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6917 /* Nothing to do here, CR2 it's not part of the VMCS. */
6918 break;
6919
6920 case 3:
6921 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6922 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6923 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6924 break;
6925
6926 case 4:
6927 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6928 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6929#ifndef IN_NEM_DARWIN
6930 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6931 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6932#else
6933 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6934#endif
6935 break;
6936
6937 case 8:
6938 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6939 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6940 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6941 break;
6942
6943 default:
6944 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6945 break;
6946 }
6947
6948 if (rcStrict == VINF_IEM_RAISED_XCPT)
6949 {
6950 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6951 rcStrict = VINF_SUCCESS;
6952 }
6953 return rcStrict;
6954}
6955
6956
6957/**
6958 * VM-exit exception handler for \#PF (Page-fault exception).
6959 *
6960 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6961 */
6962static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6963{
6964 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6965 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6966
6967#ifndef IN_NEM_DARWIN
6968 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6969 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6970 { /* likely */ }
6971 else
6972#endif
6973 {
6974#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6975 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6976#endif
6977 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6978 if (!pVmxTransient->fVectoringDoublePF)
6979 {
6980 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6981 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6982 }
6983 else
6984 {
6985 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6986 Assert(!pVmxTransient->fIsNestedGuest);
6987 vmxHCSetPendingXcptDF(pVCpu);
6988 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6989 }
6990 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6991 return VINF_SUCCESS;
6992 }
6993
6994 Assert(!pVmxTransient->fIsNestedGuest);
6995
6996 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6997 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6998 if (pVmxTransient->fVectoringPF)
6999 {
7000 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7001 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7002 }
7003
7004 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7005 AssertRCReturn(rc, rc);
7006
7007 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
7008 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
7009
7010 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
7011 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
7012
7013 Log4Func(("#PF: rc=%Rrc\n", rc));
7014 if (rc == VINF_SUCCESS)
7015 {
7016 /*
7017 * This is typically a shadow page table sync or a MMIO instruction. But we may have
7018 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
7019 */
7020 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7021 TRPMResetTrap(pVCpu);
7022 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
7023 return rc;
7024 }
7025
7026 if (rc == VINF_EM_RAW_GUEST_TRAP)
7027 {
7028 if (!pVmxTransient->fVectoringDoublePF)
7029 {
7030 /* It's a guest page fault and needs to be reflected to the guest. */
7031 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
7032 TRPMResetTrap(pVCpu);
7033 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
7034 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
7035 uGstErrorCode, pVmxTransient->uExitQual);
7036 }
7037 else
7038 {
7039 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7040 TRPMResetTrap(pVCpu);
7041 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
7042 vmxHCSetPendingXcptDF(pVCpu);
7043 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
7044 }
7045
7046 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
7047 return VINF_SUCCESS;
7048 }
7049
7050 TRPMResetTrap(pVCpu);
7051 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
7052 return rc;
7053}
7054
7055
7056/**
7057 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
7058 *
7059 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7060 */
7061static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7062{
7063 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7064 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
7065
7066 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7067 AssertRCReturn(rc, rc);
7068
7069 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
7070 {
7071 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
7072 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
7073
7074 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
7075 * provides VM-exit instruction length. If this causes problem later,
7076 * disassemble the instruction like it's done on AMD-V. */
7077 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7078 AssertRCReturn(rc2, rc2);
7079 return rc;
7080 }
7081
7082 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
7083 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7084 return VINF_SUCCESS;
7085}
7086
7087
7088/**
7089 * VM-exit exception handler for \#BP (Breakpoint exception).
7090 *
7091 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7092 */
7093static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7094{
7095 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7096 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
7097
7098 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7099 AssertRCReturn(rc, rc);
7100
7101 VBOXSTRICTRC rcStrict;
7102 if (!pVmxTransient->fIsNestedGuest)
7103 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
7104 else
7105 rcStrict = VINF_EM_RAW_GUEST_TRAP;
7106
7107 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
7108 {
7109 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7110 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7111 rcStrict = VINF_SUCCESS;
7112 }
7113
7114 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
7115 return rcStrict;
7116}
7117
7118
7119/**
7120 * VM-exit exception handler for \#AC (Alignment-check exception).
7121 *
7122 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7123 */
7124static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7125{
7126 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7127
7128 /*
7129 * Detect #ACs caused by host having enabled split-lock detection.
7130 * Emulate such instructions.
7131 */
7132#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
7133 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7134 AssertRCReturn(rc, rc);
7135 /** @todo detect split lock in cpu feature? */
7136 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
7137 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7138 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
7139 || CPUMGetGuestCPL(pVCpu) != 3
7140 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
7141 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
7142 {
7143 /*
7144 * Check for debug/trace events and import state accordingly.
7145 */
7146 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
7147 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7148 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
7149#ifndef IN_NEM_DARWIN
7150 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7151#endif
7152 )
7153 {
7154 if (pVM->cCpus == 1)
7155 {
7156#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7157 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7158 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7159#else
7160 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7161 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7162#endif
7163 AssertRCReturn(rc, rc);
7164 }
7165 }
7166 else
7167 {
7168 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7169 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7170 AssertRCReturn(rc, rc);
7171
7172 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7173
7174 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7175 {
7176 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7177 if (rcStrict != VINF_SUCCESS)
7178 return rcStrict;
7179 }
7180 }
7181
7182 /*
7183 * Emulate the instruction.
7184 *
7185 * We have to ignore the LOCK prefix here as we must not retrigger the
7186 * detection on the host. This isn't all that satisfactory, though...
7187 */
7188 if (pVM->cCpus == 1)
7189 {
7190 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7191 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7192
7193 /** @todo For SMP configs we should do a rendezvous here. */
7194 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7195 if (rcStrict == VINF_SUCCESS)
7196#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7197 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7198 HM_CHANGED_GUEST_RIP
7199 | HM_CHANGED_GUEST_RFLAGS
7200 | HM_CHANGED_GUEST_GPRS_MASK
7201 | HM_CHANGED_GUEST_CS
7202 | HM_CHANGED_GUEST_SS);
7203#else
7204 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7205#endif
7206 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7207 {
7208 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7209 rcStrict = VINF_SUCCESS;
7210 }
7211 return rcStrict;
7212 }
7213 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7214 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7215 return VINF_EM_EMULATE_SPLIT_LOCK;
7216 }
7217
7218 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7219 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7220 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7221
7222 /* Re-inject it. We'll detect any nesting before getting here. */
7223 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7224 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7225 return VINF_SUCCESS;
7226}
7227
7228
7229/**
7230 * VM-exit exception handler for \#DB (Debug exception).
7231 *
7232 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7233 */
7234static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7235{
7236 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7237 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7238
7239 /*
7240 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7241 */
7242 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7243
7244 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7245 uint64_t const uDR6 = X86_DR6_INIT_VAL
7246 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7247 | X86_DR6_BD | X86_DR6_BS));
7248 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7249
7250 int rc;
7251 if (!pVmxTransient->fIsNestedGuest)
7252 {
7253 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7254
7255 /*
7256 * Prevents stepping twice over the same instruction when the guest is stepping using
7257 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7258 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7259 */
7260 if ( rc == VINF_EM_DBG_STEPPED
7261 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7262 {
7263 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7264 rc = VINF_EM_RAW_GUEST_TRAP;
7265 }
7266 }
7267 else
7268 rc = VINF_EM_RAW_GUEST_TRAP;
7269 Log6Func(("rc=%Rrc\n", rc));
7270 if (rc == VINF_EM_RAW_GUEST_TRAP)
7271 {
7272 /*
7273 * The exception was for the guest. Update DR6, DR7.GD and
7274 * IA32_DEBUGCTL.LBR before forwarding it.
7275 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7276 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7277 */
7278#ifndef IN_NEM_DARWIN
7279 VMMRZCallRing3Disable(pVCpu);
7280 HM_DISABLE_PREEMPT(pVCpu);
7281
7282 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7283 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7284 if (CPUMIsGuestDebugStateActive(pVCpu))
7285 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7286
7287 HM_RESTORE_PREEMPT();
7288 VMMRZCallRing3Enable(pVCpu);
7289#else
7290 /** @todo */
7291#endif
7292
7293 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7294 AssertRCReturn(rc, rc);
7295
7296 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7297 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7298
7299 /* Paranoia. */
7300 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7301 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7302
7303 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7304 AssertRC(rc);
7305
7306 /*
7307 * Raise #DB in the guest.
7308 *
7309 * It is important to reflect exactly what the VM-exit gave us (preserving the
7310 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7311 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7312 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7313 *
7314 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7315 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7316 */
7317 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7318 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7319 return VINF_SUCCESS;
7320 }
7321
7322 /*
7323 * Not a guest trap, must be a hypervisor related debug event then.
7324 * Update DR6 in case someone is interested in it.
7325 */
7326 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7327 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7328 CPUMSetHyperDR6(pVCpu, uDR6);
7329
7330 return rc;
7331}
7332
7333
7334/**
7335 * Hacks its way around the lovely mesa driver's backdoor accesses.
7336 *
7337 * @sa hmR0SvmHandleMesaDrvGp.
7338 */
7339static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7340{
7341 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7342 RT_NOREF(pCtx);
7343
7344 /* For now we'll just skip the instruction. */
7345 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7346}
7347
7348
7349/**
7350 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7351 * backdoor logging w/o checking what it is running inside.
7352 *
7353 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7354 * backdoor port and magic numbers loaded in registers.
7355 *
7356 * @returns true if it is, false if it isn't.
7357 * @sa hmR0SvmIsMesaDrvGp.
7358 */
7359DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7360{
7361 /* 0xed: IN eAX,dx */
7362 uint8_t abInstr[1];
7363 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7364 return false;
7365
7366 /* Check that it is #GP(0). */
7367 if (pVmxTransient->uExitIntErrorCode != 0)
7368 return false;
7369
7370 /* Check magic and port. */
7371 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7372 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7373 if (pCtx->rax != UINT32_C(0x564d5868))
7374 return false;
7375 if (pCtx->dx != UINT32_C(0x5658))
7376 return false;
7377
7378 /* Flat ring-3 CS. */
7379 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7380 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7381 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7382 if (pCtx->cs.Attr.n.u2Dpl != 3)
7383 return false;
7384 if (pCtx->cs.u64Base != 0)
7385 return false;
7386
7387 /* Check opcode. */
7388 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7389 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7390 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7391 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7392 if (RT_FAILURE(rc))
7393 return false;
7394 if (abInstr[0] != 0xed)
7395 return false;
7396
7397 return true;
7398}
7399
7400
7401/**
7402 * VM-exit exception handler for \#GP (General-protection exception).
7403 *
7404 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7405 */
7406static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7407{
7408 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7409 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7410
7411 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7412 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7413#ifndef IN_NEM_DARWIN
7414 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7415 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7416 { /* likely */ }
7417 else
7418#endif
7419 {
7420#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7421# ifndef IN_NEM_DARWIN
7422 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7423# else
7424 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7425# endif
7426#endif
7427 /*
7428 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7429 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7430 */
7431 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7432 AssertRCReturn(rc, rc);
7433 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7434 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7435
7436 if ( pVmxTransient->fIsNestedGuest
7437 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7438 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7439 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7440 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7441 else
7442 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7443 return rc;
7444 }
7445
7446#ifndef IN_NEM_DARWIN
7447 Assert(CPUMIsGuestInRealModeEx(pCtx));
7448 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7449 Assert(!pVmxTransient->fIsNestedGuest);
7450
7451 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7452 AssertRCReturn(rc, rc);
7453
7454 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7455 if (rcStrict == VINF_SUCCESS)
7456 {
7457 if (!CPUMIsGuestInRealModeEx(pCtx))
7458 {
7459 /*
7460 * The guest is no longer in real-mode, check if we can continue executing the
7461 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7462 */
7463 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7464 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7465 {
7466 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7467 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7468 }
7469 else
7470 {
7471 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7472 rcStrict = VINF_EM_RESCHEDULE;
7473 }
7474 }
7475 else
7476 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7477 }
7478 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7479 {
7480 rcStrict = VINF_SUCCESS;
7481 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7482 }
7483 return VBOXSTRICTRC_VAL(rcStrict);
7484#endif
7485}
7486
7487
7488/**
7489 * VM-exit exception handler for \#DE (Divide Error).
7490 *
7491 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7492 */
7493static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7494{
7495 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7496 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7497
7498 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7499 AssertRCReturn(rc, rc);
7500
7501 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7502 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7503 {
7504 uint8_t cbInstr = 0;
7505 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7506 if (rc2 == VINF_SUCCESS)
7507 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7508 else if (rc2 == VERR_NOT_FOUND)
7509 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7510 else
7511 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7512 }
7513 else
7514 rcStrict = VINF_SUCCESS; /* Do nothing. */
7515
7516 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7517 if (RT_FAILURE(rcStrict))
7518 {
7519 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7520 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7521 rcStrict = VINF_SUCCESS;
7522 }
7523
7524 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7525 return VBOXSTRICTRC_VAL(rcStrict);
7526}
7527
7528
7529/**
7530 * VM-exit exception handler wrapper for all other exceptions that are not handled
7531 * by a specific handler.
7532 *
7533 * This simply re-injects the exception back into the VM without any special
7534 * processing.
7535 *
7536 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7537 */
7538static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7539{
7540 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7541
7542#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7543# ifndef IN_NEM_DARWIN
7544 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7545 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7546 ("uVector=%#x u32XcptBitmap=%#X32\n",
7547 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7548 NOREF(pVmcsInfo);
7549# endif
7550#endif
7551
7552 /*
7553 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7554 * would have been handled while checking exits due to event delivery.
7555 */
7556 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7557
7558#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7559 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7560 AssertRCReturn(rc, rc);
7561 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7562#endif
7563
7564#ifdef VBOX_WITH_STATISTICS
7565 switch (uVector)
7566 {
7567 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7568 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7569 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7570 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7571 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7572 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7573 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7574 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7575 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7576 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7577 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7578 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7579 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7580 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7581 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7582 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7583 default:
7584 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7585 break;
7586 }
7587#endif
7588
7589 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7590 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7591 NOREF(uVector);
7592
7593 /* Re-inject the original exception into the guest. */
7594 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7595 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7596 return VINF_SUCCESS;
7597}
7598
7599
7600/**
7601 * VM-exit exception handler for all exceptions (except NMIs!).
7602 *
7603 * @remarks This may be called for both guests and nested-guests. Take care to not
7604 * make assumptions and avoid doing anything that is not relevant when
7605 * executing a nested-guest (e.g., Mesa driver hacks).
7606 */
7607static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7608{
7609 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7610
7611 /*
7612 * If this VM-exit occurred while delivering an event through the guest IDT, take
7613 * action based on the return code and additional hints (e.g. for page-faults)
7614 * that will be updated in the VMX transient structure.
7615 */
7616 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7617 if (rcStrict == VINF_SUCCESS)
7618 {
7619 /*
7620 * If an exception caused a VM-exit due to delivery of an event, the original
7621 * event may have to be re-injected into the guest. We shall reinject it and
7622 * continue guest execution. However, page-fault is a complicated case and
7623 * needs additional processing done in vmxHCExitXcptPF().
7624 */
7625 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7626 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7627 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7628 || uVector == X86_XCPT_PF)
7629 {
7630 switch (uVector)
7631 {
7632 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7633 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7634 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7635 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7636 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7637 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7638 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7639 default:
7640 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7641 }
7642 }
7643 /* else: inject pending event before resuming guest execution. */
7644 }
7645 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7646 {
7647 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7648 rcStrict = VINF_SUCCESS;
7649 }
7650
7651 return rcStrict;
7652}
7653/** @} */
7654
7655
7656/** @name VM-exit handlers.
7657 * @{
7658 */
7659/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7660/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7661/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7662
7663/**
7664 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7665 */
7666HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7667{
7668 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7669 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7670
7671#ifndef IN_NEM_DARWIN
7672 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7673 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7674 return VINF_SUCCESS;
7675 return VINF_EM_RAW_INTERRUPT;
7676#else
7677 return VINF_SUCCESS;
7678#endif
7679}
7680
7681
7682/**
7683 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7684 * VM-exit.
7685 */
7686HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7687{
7688 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7689 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7690
7691 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7692
7693 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7694 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7695 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7696
7697 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7698 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7699 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7700 NOREF(pVmcsInfo);
7701
7702 VBOXSTRICTRC rcStrict;
7703 switch (uExitIntType)
7704 {
7705#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7706 /*
7707 * Host physical NMIs:
7708 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7709 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7710 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7711 *
7712 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7713 * See Intel spec. 27.5.5 "Updating Non-Register State".
7714 */
7715 case VMX_EXIT_INT_INFO_TYPE_NMI:
7716 {
7717 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7718 break;
7719 }
7720#endif
7721
7722 /*
7723 * Privileged software exceptions (#DB from ICEBP),
7724 * Software exceptions (#BP and #OF),
7725 * Hardware exceptions:
7726 * Process the required exceptions and resume guest execution if possible.
7727 */
7728 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7729 Assert(uVector == X86_XCPT_DB);
7730 RT_FALL_THRU();
7731 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7732 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7733 RT_FALL_THRU();
7734 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7735 {
7736 NOREF(uVector);
7737 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7738 | HMVMX_READ_EXIT_INSTR_LEN
7739 | HMVMX_READ_IDT_VECTORING_INFO
7740 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7741 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7742 break;
7743 }
7744
7745 default:
7746 {
7747 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7748 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7749 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7750 break;
7751 }
7752 }
7753
7754 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7755 return rcStrict;
7756}
7757
7758
7759/**
7760 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7761 */
7762HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7763{
7764 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7765
7766 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7767 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7768 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7769
7770 /* Evaluate and deliver pending events and resume guest execution. */
7771 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7772 return VINF_SUCCESS;
7773}
7774
7775
7776/**
7777 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7778 */
7779HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7780{
7781 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7782
7783 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7784 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7785 {
7786 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7787 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7788 }
7789
7790 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7791
7792 /*
7793 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7794 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7795 */
7796 uint32_t fIntrState;
7797 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7798 AssertRC(rc);
7799 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7800 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7801 {
7802 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7803
7804 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7805 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7806 AssertRC(rc);
7807 }
7808
7809 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready. */
7810 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7811
7812 /* Evaluate and deliver pending events and resume guest execution. */
7813 return VINF_SUCCESS;
7814}
7815
7816
7817/**
7818 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7819 */
7820HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7821{
7822 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7823 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7824}
7825
7826
7827/**
7828 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7829 */
7830HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7831{
7832 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7833 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7834}
7835
7836
7837/**
7838 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7839 */
7840HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7841{
7842 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7843
7844 /*
7845 * Get the state we need and update the exit history entry.
7846 */
7847 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7848 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7849 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7850 AssertRCReturn(rc, rc);
7851
7852 VBOXSTRICTRC rcStrict;
7853 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7854 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7855 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7856 if (!pExitRec)
7857 {
7858 /*
7859 * Regular CPUID instruction execution.
7860 */
7861 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7862 if (rcStrict == VINF_SUCCESS)
7863 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7864 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7865 {
7866 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7867 rcStrict = VINF_SUCCESS;
7868 }
7869 }
7870 else
7871 {
7872 /*
7873 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7874 */
7875 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7876 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7877 AssertRCReturn(rc2, rc2);
7878
7879 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7880 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7881
7882 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7883 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7884
7885 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7886 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7887 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7888 }
7889 return rcStrict;
7890}
7891
7892
7893/**
7894 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7895 */
7896HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7897{
7898 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7899
7900 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7901 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7902 AssertRCReturn(rc, rc);
7903
7904 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7905 return VINF_EM_RAW_EMULATE_INSTR;
7906
7907 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7908 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7909}
7910
7911
7912/**
7913 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7914 */
7915HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7916{
7917 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7918
7919 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7920 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7921 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7922 AssertRCReturn(rc, rc);
7923
7924 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7925 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7926 {
7927 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7928 we must reset offsetting on VM-entry. See @bugref{6634}. */
7929 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7930 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7931 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7932 }
7933 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7934 {
7935 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7936 rcStrict = VINF_SUCCESS;
7937 }
7938 return rcStrict;
7939}
7940
7941
7942/**
7943 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7944 */
7945HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7946{
7947 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7948
7949 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7950 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7951 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7952 AssertRCReturn(rc, rc);
7953
7954 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7955 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7956 {
7957 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7958 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7959 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7960 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7961 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7962 }
7963 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7964 {
7965 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7966 rcStrict = VINF_SUCCESS;
7967 }
7968 return rcStrict;
7969}
7970
7971
7972/**
7973 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7974 */
7975HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7976{
7977 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7978
7979 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7980 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7981 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7982 AssertRCReturn(rc, rc);
7983
7984 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7985 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7986 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7987 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7988 {
7989 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7990 rcStrict = VINF_SUCCESS;
7991 }
7992 return rcStrict;
7993}
7994
7995
7996/**
7997 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7998 */
7999HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8000{
8001 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8002
8003 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
8004 if (EMAreHypercallInstructionsEnabled(pVCpu))
8005 {
8006 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8007 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
8008 | CPUMCTX_EXTRN_RFLAGS
8009 | CPUMCTX_EXTRN_CR0
8010 | CPUMCTX_EXTRN_SS
8011 | CPUMCTX_EXTRN_CS
8012 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
8013 AssertRCReturn(rc, rc);
8014
8015 /* Perform the hypercall. */
8016 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
8017 if (rcStrict == VINF_SUCCESS)
8018 {
8019 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8020 AssertRCReturn(rc, rc);
8021 }
8022 else
8023 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
8024 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
8025 || RT_FAILURE(rcStrict));
8026
8027 /* If the hypercall changes anything other than guest's general-purpose registers,
8028 we would need to reload the guest changed bits here before VM-entry. */
8029 }
8030 else
8031 Log4Func(("Hypercalls not enabled\n"));
8032
8033 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
8034 if (RT_FAILURE(rcStrict))
8035 {
8036 vmxHCSetPendingXcptUD(pVCpu);
8037 rcStrict = VINF_SUCCESS;
8038 }
8039
8040 return rcStrict;
8041}
8042
8043
8044/**
8045 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
8046 */
8047HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8048{
8049 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8050#ifndef IN_NEM_DARWIN
8051 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
8052#endif
8053
8054 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8055 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8056 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8057 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8058 AssertRCReturn(rc, rc);
8059
8060 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
8061
8062 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
8063 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8064 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8065 {
8066 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8067 rcStrict = VINF_SUCCESS;
8068 }
8069 else
8070 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
8071 VBOXSTRICTRC_VAL(rcStrict)));
8072 return rcStrict;
8073}
8074
8075
8076/**
8077 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
8078 */
8079HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8080{
8081 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8082
8083 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8084 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8085 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
8086 AssertRCReturn(rc, rc);
8087
8088 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
8089 if (rcStrict == VINF_SUCCESS)
8090 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8091 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8092 {
8093 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8094 rcStrict = VINF_SUCCESS;
8095 }
8096
8097 return rcStrict;
8098}
8099
8100
8101/**
8102 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8103 */
8104HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8105{
8106 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8107
8108 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8109 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8110 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8111 AssertRCReturn(rc, rc);
8112
8113 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
8114 if (RT_SUCCESS(rcStrict))
8115 {
8116 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8117 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
8118 rcStrict = VINF_SUCCESS;
8119 }
8120
8121 return rcStrict;
8122}
8123
8124
8125/**
8126 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8127 * VM-exit.
8128 */
8129HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8130{
8131 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8132 return VINF_EM_RESET;
8133}
8134
8135
8136/**
8137 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8138 */
8139HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8140{
8141 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8142
8143 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8144 AssertRCReturn(rc, rc);
8145
8146 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
8147 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
8148 rc = VINF_SUCCESS;
8149 else
8150 rc = VINF_EM_HALT;
8151
8152 if (rc != VINF_SUCCESS)
8153 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8154 return rc;
8155}
8156
8157
8158#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8159/**
8160 * VM-exit handler for instructions that result in a \#UD exception delivered to
8161 * the guest.
8162 */
8163HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8164{
8165 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8166 vmxHCSetPendingXcptUD(pVCpu);
8167 return VINF_SUCCESS;
8168}
8169#endif
8170
8171
8172/**
8173 * VM-exit handler for expiry of the VMX-preemption timer.
8174 */
8175HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8176{
8177 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8178
8179 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8180 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8181Log12(("vmxHCExitPreemptTimer:\n"));
8182
8183 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8184 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8185 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8186 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8187 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8188}
8189
8190
8191/**
8192 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8193 */
8194HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8195{
8196 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8197
8198 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8199 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8200 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8201 AssertRCReturn(rc, rc);
8202
8203 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8204 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8205 : HM_CHANGED_RAISED_XCPT_MASK);
8206
8207#ifndef IN_NEM_DARWIN
8208 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8209 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8210 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8211 {
8212 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8213 hmR0VmxUpdateStartVmFunction(pVCpu);
8214 }
8215#endif
8216
8217 return rcStrict;
8218}
8219
8220
8221/**
8222 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8223 */
8224HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8225{
8226 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8227
8228 /** @todo Enable the new code after finding a reliably guest test-case. */
8229#if 1
8230 return VERR_EM_INTERPRETER;
8231#else
8232 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8233 | HMVMX_READ_EXIT_INSTR_INFO
8234 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8235 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8236 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8237 AssertRCReturn(rc, rc);
8238
8239 /* Paranoia. Ensure this has a memory operand. */
8240 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8241
8242 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8243 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8244 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8245 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8246
8247 RTGCPTR GCPtrDesc;
8248 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8249
8250 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8251 GCPtrDesc, uType);
8252 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8253 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8254 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8255 {
8256 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8257 rcStrict = VINF_SUCCESS;
8258 }
8259 return rcStrict;
8260#endif
8261}
8262
8263
8264/**
8265 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8266 * VM-exit.
8267 */
8268HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8269{
8270 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8271 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8272 AssertRCReturn(rc, rc);
8273
8274 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8275 if (RT_FAILURE(rc))
8276 return rc;
8277
8278 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8279 NOREF(uInvalidReason);
8280
8281#ifdef VBOX_STRICT
8282 uint32_t fIntrState;
8283 uint64_t u64Val;
8284 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8285 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8286 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8287
8288 Log4(("uInvalidReason %u\n", uInvalidReason));
8289 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8290 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8291 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8292
8293 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8294 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8295 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8296 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8297 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8298 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8299 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8300 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8301 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8302 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8303 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8304 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8305# ifndef IN_NEM_DARWIN
8306 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8307 {
8308 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8309 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8310 }
8311
8312 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8313# endif
8314#endif
8315
8316 return VERR_VMX_INVALID_GUEST_STATE;
8317}
8318
8319/**
8320 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8321 */
8322HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8323{
8324 /*
8325 * Cumulative notes of all recognized but unexpected VM-exits.
8326 *
8327 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8328 * nested-paging is used.
8329 *
8330 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8331 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8332 * this function (and thereby stop VM execution) for handling such instructions.
8333 *
8334 *
8335 * VMX_EXIT_INIT_SIGNAL:
8336 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8337 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8338 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8339 *
8340 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8341 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8342 * See Intel spec. "23.8 Restrictions on VMX operation".
8343 *
8344 * VMX_EXIT_SIPI:
8345 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8346 * activity state is used. We don't make use of it as our guests don't have direct
8347 * access to the host local APIC.
8348 *
8349 * See Intel spec. 25.3 "Other Causes of VM-exits".
8350 *
8351 * VMX_EXIT_IO_SMI:
8352 * VMX_EXIT_SMI:
8353 * This can only happen if we support dual-monitor treatment of SMI, which can be
8354 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8355 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8356 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8357 *
8358 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8359 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8360 *
8361 * VMX_EXIT_ERR_MSR_LOAD:
8362 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8363 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8364 * execution.
8365 *
8366 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8367 *
8368 * VMX_EXIT_ERR_MACHINE_CHECK:
8369 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8370 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8371 * #MC exception abort class exception is raised. We thus cannot assume a
8372 * reasonable chance of continuing any sort of execution and we bail.
8373 *
8374 * See Intel spec. 15.1 "Machine-check Architecture".
8375 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8376 *
8377 * VMX_EXIT_PML_FULL:
8378 * VMX_EXIT_VIRTUALIZED_EOI:
8379 * VMX_EXIT_APIC_WRITE:
8380 * We do not currently support any of these features and thus they are all unexpected
8381 * VM-exits.
8382 *
8383 * VMX_EXIT_GDTR_IDTR_ACCESS:
8384 * VMX_EXIT_LDTR_TR_ACCESS:
8385 * VMX_EXIT_RDRAND:
8386 * VMX_EXIT_RSM:
8387 * VMX_EXIT_VMFUNC:
8388 * VMX_EXIT_ENCLS:
8389 * VMX_EXIT_RDSEED:
8390 * VMX_EXIT_XSAVES:
8391 * VMX_EXIT_XRSTORS:
8392 * VMX_EXIT_UMWAIT:
8393 * VMX_EXIT_TPAUSE:
8394 * VMX_EXIT_LOADIWKEY:
8395 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8396 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8397 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8398 *
8399 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8400 */
8401 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8402 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8403 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8404}
8405
8406
8407/**
8408 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8409 */
8410HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8411{
8412 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8413
8414 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8415
8416 /** @todo Optimize this: We currently drag in the whole MSR state
8417 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8418 * MSRs required. That would require changes to IEM and possibly CPUM too.
8419 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8420 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8421 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8422 int rc;
8423 switch (idMsr)
8424 {
8425 default:
8426 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8427 __FUNCTION__);
8428 AssertRCReturn(rc, rc);
8429 break;
8430 case MSR_K8_FS_BASE:
8431 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8432 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8433 AssertRCReturn(rc, rc);
8434 break;
8435 case MSR_K8_GS_BASE:
8436 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8437 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8438 AssertRCReturn(rc, rc);
8439 break;
8440 }
8441
8442 Log4Func(("ecx=%#RX32\n", idMsr));
8443
8444#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8445 Assert(!pVmxTransient->fIsNestedGuest);
8446 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8447 {
8448 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8449 && idMsr != MSR_K6_EFER)
8450 {
8451 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8452 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8453 }
8454 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8455 {
8456 Assert(pVmcsInfo->pvMsrBitmap);
8457 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8458 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8459 {
8460 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8461 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8462 }
8463 }
8464 }
8465#endif
8466
8467 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8468 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8469 if (rcStrict == VINF_SUCCESS)
8470 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8471 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8472 {
8473 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8474 rcStrict = VINF_SUCCESS;
8475 }
8476 else
8477 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8478 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8479
8480 return rcStrict;
8481}
8482
8483
8484/**
8485 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8486 */
8487HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8488{
8489 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8490
8491 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8492
8493 /*
8494 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8495 * Although we don't need to fetch the base as it will be overwritten shortly, while
8496 * loading guest-state we would also load the entire segment register including limit
8497 * and attributes and thus we need to load them here.
8498 */
8499 /** @todo Optimize this: We currently drag in the whole MSR state
8500 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8501 * MSRs required. That would require changes to IEM and possibly CPUM too.
8502 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8503 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8504 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8505 int rc;
8506 switch (idMsr)
8507 {
8508 default:
8509 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8510 __FUNCTION__);
8511 AssertRCReturn(rc, rc);
8512 break;
8513
8514 case MSR_K8_FS_BASE:
8515 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8516 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8517 AssertRCReturn(rc, rc);
8518 break;
8519 case MSR_K8_GS_BASE:
8520 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8521 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8522 AssertRCReturn(rc, rc);
8523 break;
8524 }
8525 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8526
8527 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8528 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8529
8530 if (rcStrict == VINF_SUCCESS)
8531 {
8532 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8533
8534 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8535 if ( idMsr == MSR_IA32_APICBASE
8536 || ( idMsr >= MSR_IA32_X2APIC_START
8537 && idMsr <= MSR_IA32_X2APIC_END))
8538 {
8539 /*
8540 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8541 * When full APIC register virtualization is implemented we'll have to make
8542 * sure APIC state is saved from the VMCS before IEM changes it.
8543 */
8544 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8545 }
8546 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8547 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8548 else if (idMsr == MSR_K6_EFER)
8549 {
8550 /*
8551 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8552 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8553 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8554 */
8555 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8556 }
8557
8558 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8559 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8560 {
8561 switch (idMsr)
8562 {
8563 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8564 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8565 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8566 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8567 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8568 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8569 default:
8570 {
8571#ifndef IN_NEM_DARWIN
8572 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8573 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8574 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8575 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8576#else
8577 AssertMsgFailed(("TODO\n"));
8578#endif
8579 break;
8580 }
8581 }
8582 }
8583#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8584 else
8585 {
8586 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8587 switch (idMsr)
8588 {
8589 case MSR_IA32_SYSENTER_CS:
8590 case MSR_IA32_SYSENTER_EIP:
8591 case MSR_IA32_SYSENTER_ESP:
8592 case MSR_K8_FS_BASE:
8593 case MSR_K8_GS_BASE:
8594 {
8595 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8596 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8597 }
8598
8599 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8600 default:
8601 {
8602 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8603 {
8604 /* EFER MSR writes are always intercepted. */
8605 if (idMsr != MSR_K6_EFER)
8606 {
8607 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8608 idMsr));
8609 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8610 }
8611 }
8612
8613 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8614 {
8615 Assert(pVmcsInfo->pvMsrBitmap);
8616 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8617 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8618 {
8619 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8620 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8621 }
8622 }
8623 break;
8624 }
8625 }
8626 }
8627#endif /* VBOX_STRICT */
8628 }
8629 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8630 {
8631 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8632 rcStrict = VINF_SUCCESS;
8633 }
8634 else
8635 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8636 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8637
8638 return rcStrict;
8639}
8640
8641
8642/**
8643 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8644 */
8645HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8646{
8647 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8648
8649 /** @todo The guest has likely hit a contended spinlock. We might want to
8650 * poke a schedule different guest VCPU. */
8651 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8652 if (RT_SUCCESS(rc))
8653 return VINF_EM_RAW_INTERRUPT;
8654
8655 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8656 return rc;
8657}
8658
8659
8660/**
8661 * VM-exit handler for when the TPR value is lowered below the specified
8662 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8663 */
8664HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8665{
8666 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8667 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8668
8669 /*
8670 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8671 * We'll re-evaluate pending interrupts and inject them before the next VM
8672 * entry so we can just continue execution here.
8673 */
8674 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8675 return VINF_SUCCESS;
8676}
8677
8678
8679/**
8680 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8681 * VM-exit.
8682 *
8683 * @retval VINF_SUCCESS when guest execution can continue.
8684 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8685 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8686 * incompatible guest state for VMX execution (real-on-v86 case).
8687 */
8688HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8689{
8690 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8691 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8692
8693 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8694 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8695 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8696
8697 VBOXSTRICTRC rcStrict;
8698 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8699 uint64_t const uExitQual = pVmxTransient->uExitQual;
8700 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8701 switch (uAccessType)
8702 {
8703 /*
8704 * MOV to CRx.
8705 */
8706 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8707 {
8708 /*
8709 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8710 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8711 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8712 * PAE PDPTEs as well.
8713 */
8714 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8715 AssertRCReturn(rc, rc);
8716
8717 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8718#ifndef IN_NEM_DARWIN
8719 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8720#endif
8721 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8722 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8723
8724 /*
8725 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8726 * - When nested paging isn't used.
8727 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8728 * - We are executing in the VM debug loop.
8729 */
8730#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8731# ifndef IN_NEM_DARWIN
8732 Assert( iCrReg != 3
8733 || !VM_IS_VMX_NESTED_PAGING(pVM)
8734 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8735 || pVCpu->hmr0.s.fUsingDebugLoop);
8736# else
8737 Assert( iCrReg != 3
8738 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8739# endif
8740#endif
8741
8742 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8743 Assert( iCrReg != 8
8744 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8745
8746 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8747 AssertMsg( rcStrict == VINF_SUCCESS
8748 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8749
8750#ifndef IN_NEM_DARWIN
8751 /*
8752 * This is a kludge for handling switches back to real mode when we try to use
8753 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8754 * deal with special selector values, so we have to return to ring-3 and run
8755 * there till the selector values are V86 mode compatible.
8756 *
8757 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8758 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8759 * this function.
8760 */
8761 if ( iCrReg == 0
8762 && rcStrict == VINF_SUCCESS
8763 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8764 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8765 && (uOldCr0 & X86_CR0_PE)
8766 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8767 {
8768 /** @todo Check selectors rather than returning all the time. */
8769 Assert(!pVmxTransient->fIsNestedGuest);
8770 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8771 rcStrict = VINF_EM_RESCHEDULE_REM;
8772 }
8773#endif
8774
8775 break;
8776 }
8777
8778 /*
8779 * MOV from CRx.
8780 */
8781 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8782 {
8783 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8784 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8785
8786 /*
8787 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8788 * - When nested paging isn't used.
8789 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8790 * - We are executing in the VM debug loop.
8791 */
8792#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8793# ifndef IN_NEM_DARWIN
8794 Assert( iCrReg != 3
8795 || !VM_IS_VMX_NESTED_PAGING(pVM)
8796 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8797 || pVCpu->hmr0.s.fLeaveDone);
8798# else
8799 Assert( iCrReg != 3
8800 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8801# endif
8802#endif
8803
8804 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8805 Assert( iCrReg != 8
8806 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8807
8808 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8809 break;
8810 }
8811
8812 /*
8813 * CLTS (Clear Task-Switch Flag in CR0).
8814 */
8815 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8816 {
8817 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8818 break;
8819 }
8820
8821 /*
8822 * LMSW (Load Machine-Status Word into CR0).
8823 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8824 */
8825 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8826 {
8827 RTGCPTR GCPtrEffDst;
8828 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8829 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8830 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8831 if (fMemOperand)
8832 {
8833 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8834 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8835 }
8836 else
8837 GCPtrEffDst = NIL_RTGCPTR;
8838 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8839 break;
8840 }
8841
8842 default:
8843 {
8844 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8845 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8846 }
8847 }
8848
8849 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8850 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8851 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8852
8853 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8854 NOREF(pVM);
8855 return rcStrict;
8856}
8857
8858
8859/**
8860 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8861 * VM-exit.
8862 */
8863HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8864{
8865 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8866 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8867
8868 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8869 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8870 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8871 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8872#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8873 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8874 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8875 AssertRCReturn(rc, rc);
8876
8877 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8878 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8879 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8880 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8881 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8882 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8883 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8884 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8885
8886 /*
8887 * Update exit history to see if this exit can be optimized.
8888 */
8889 VBOXSTRICTRC rcStrict;
8890 PCEMEXITREC pExitRec = NULL;
8891 if ( !fGstStepping
8892 && !fDbgStepping)
8893 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8894 !fIOString
8895 ? !fIOWrite
8896 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8897 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8898 : !fIOWrite
8899 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8900 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8901 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8902 if (!pExitRec)
8903 {
8904 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8905 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8906
8907 uint32_t const cbValue = s_aIOSizes[uIOSize];
8908 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8909 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8910 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8911 if (fIOString)
8912 {
8913 /*
8914 * INS/OUTS - I/O String instruction.
8915 *
8916 * Use instruction-information if available, otherwise fall back on
8917 * interpreting the instruction.
8918 */
8919 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8920 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8921 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8922 if (fInsOutsInfo)
8923 {
8924 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8925 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8926 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8927 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8928 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8929 if (fIOWrite)
8930 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8931 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8932 else
8933 {
8934 /*
8935 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8936 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8937 * See Intel Instruction spec. for "INS".
8938 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8939 */
8940 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8941 }
8942 }
8943 else
8944 rcStrict = IEMExecOne(pVCpu);
8945
8946 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8947 fUpdateRipAlready = true;
8948 }
8949 else
8950 {
8951 /*
8952 * IN/OUT - I/O instruction.
8953 */
8954 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8955 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8956 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8957 if (fIOWrite)
8958 {
8959 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8960 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8961#ifndef IN_NEM_DARWIN
8962 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8963 && !pCtx->eflags.Bits.u1TF)
8964 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8965#endif
8966 }
8967 else
8968 {
8969 uint32_t u32Result = 0;
8970 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8971 if (IOM_SUCCESS(rcStrict))
8972 {
8973 /* Save result of I/O IN instr. in AL/AX/EAX. */
8974 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8975 }
8976#ifndef IN_NEM_DARWIN
8977 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8978 && !pCtx->eflags.Bits.u1TF)
8979 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8980#endif
8981 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8982 }
8983 }
8984
8985 if (IOM_SUCCESS(rcStrict))
8986 {
8987 if (!fUpdateRipAlready)
8988 {
8989 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8990 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8991 }
8992
8993 /*
8994 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8995 * while booting Fedora 17 64-bit guest.
8996 *
8997 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8998 */
8999 if (fIOString)
9000 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
9001
9002 /*
9003 * If any I/O breakpoints are armed, we need to check if one triggered
9004 * and take appropriate action.
9005 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
9006 */
9007#if 1
9008 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
9009#else
9010 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
9011 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
9012 AssertRCReturn(rc, rc);
9013#endif
9014
9015 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
9016 * execution engines about whether hyper BPs and such are pending. */
9017 uint32_t const uDr7 = pCtx->dr[7];
9018 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
9019 && X86_DR7_ANY_RW_IO(uDr7)
9020 && (pCtx->cr4 & X86_CR4_DE))
9021 || DBGFBpIsHwIoArmed(pVM)))
9022 {
9023 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
9024
9025#ifndef IN_NEM_DARWIN
9026 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
9027 VMMRZCallRing3Disable(pVCpu);
9028 HM_DISABLE_PREEMPT(pVCpu);
9029
9030 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
9031
9032 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
9033 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
9034 {
9035 /* Raise #DB. */
9036 if (fIsGuestDbgActive)
9037 ASMSetDR6(pCtx->dr[6]);
9038 if (pCtx->dr[7] != uDr7)
9039 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
9040
9041 vmxHCSetPendingXcptDB(pVCpu);
9042 }
9043 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
9044 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
9045 else if ( rcStrict2 != VINF_SUCCESS
9046 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
9047 rcStrict = rcStrict2;
9048 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
9049
9050 HM_RESTORE_PREEMPT();
9051 VMMRZCallRing3Enable(pVCpu);
9052#else
9053 /** @todo */
9054#endif
9055 }
9056 }
9057
9058#ifdef VBOX_STRICT
9059 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
9060 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
9061 Assert(!fIOWrite);
9062 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
9063 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
9064 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
9065 Assert(fIOWrite);
9066 else
9067 {
9068# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
9069 * statuses, that the VMM device and some others may return. See
9070 * IOM_SUCCESS() for guidance. */
9071 AssertMsg( RT_FAILURE(rcStrict)
9072 || rcStrict == VINF_SUCCESS
9073 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
9074 || rcStrict == VINF_EM_DBG_BREAKPOINT
9075 || rcStrict == VINF_EM_RAW_GUEST_TRAP
9076 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9077# endif
9078 }
9079#endif
9080 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
9081 }
9082 else
9083 {
9084 /*
9085 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
9086 */
9087 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
9088 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
9089 AssertRCReturn(rc2, rc2);
9090 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
9091 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
9092 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
9093 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9094 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
9095 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
9096
9097 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9098 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9099
9100 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9101 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9102 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9103 }
9104 return rcStrict;
9105}
9106
9107
9108/**
9109 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9110 * VM-exit.
9111 */
9112HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9113{
9114 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9115
9116 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9117 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9118 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
9119 {
9120 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
9121 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9122 {
9123 uint32_t uErrCode;
9124 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
9125 {
9126 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9127 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
9128 }
9129 else
9130 uErrCode = 0;
9131
9132 RTGCUINTPTR GCPtrFaultAddress;
9133 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
9134 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
9135 else
9136 GCPtrFaultAddress = 0;
9137
9138 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9139
9140 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
9141 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
9142
9143 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
9144 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
9145 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9146 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9147 }
9148 }
9149
9150 /* Fall back to the interpreter to emulate the task-switch. */
9151 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9152 return VERR_EM_INTERPRETER;
9153}
9154
9155
9156/**
9157 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9158 */
9159HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9160{
9161 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9162
9163 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9164 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9165 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9166 AssertRC(rc);
9167 return VINF_EM_DBG_STEPPED;
9168}
9169
9170
9171/**
9172 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9173 */
9174HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9175{
9176 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9177 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9178
9179 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9180 | HMVMX_READ_EXIT_INSTR_LEN
9181 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9182 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9183 | HMVMX_READ_IDT_VECTORING_INFO
9184 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9185
9186 /*
9187 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9188 */
9189 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9190 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9191 {
9192 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9193 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9194 {
9195 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9196 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9197 }
9198 }
9199 else
9200 {
9201 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9202 return rcStrict;
9203 }
9204
9205 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9206 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9207 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9208 AssertRCReturn(rc, rc);
9209
9210 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9211 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9212 switch (uAccessType)
9213 {
9214#ifndef IN_NEM_DARWIN
9215 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9216 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9217 {
9218 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9219 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9220 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9221
9222 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9223 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9224 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9225 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9226 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9227
9228 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9229 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9230 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9231 if ( rcStrict == VINF_SUCCESS
9232 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9233 || rcStrict == VERR_PAGE_NOT_PRESENT)
9234 {
9235 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9236 | HM_CHANGED_GUEST_APIC_TPR);
9237 rcStrict = VINF_SUCCESS;
9238 }
9239 break;
9240 }
9241#else
9242 /** @todo */
9243#endif
9244
9245 default:
9246 {
9247 Log4Func(("uAccessType=%#x\n", uAccessType));
9248 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9249 break;
9250 }
9251 }
9252
9253 if (rcStrict != VINF_SUCCESS)
9254 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9255 return rcStrict;
9256}
9257
9258
9259/**
9260 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9261 * VM-exit.
9262 */
9263HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9264{
9265 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9266 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9267
9268 /*
9269 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9270 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9271 * must emulate the MOV DRx access.
9272 */
9273 if (!pVmxTransient->fIsNestedGuest)
9274 {
9275 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9276 if ( pVmxTransient->fWasGuestDebugStateActive
9277#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9278 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9279#endif
9280 )
9281 {
9282 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9283 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9284 }
9285
9286 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9287 && !pVmxTransient->fWasHyperDebugStateActive)
9288 {
9289 Assert(!DBGFIsStepping(pVCpu));
9290 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9291
9292 /* Whether we disable intercepting MOV DRx instructions and resume
9293 the current one, or emulate it and keep intercepting them is
9294 configurable. Though it usually comes down to whether there are
9295 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9296#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9297 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9298#else
9299 bool const fResumeInstruction = true;
9300#endif
9301 if (fResumeInstruction)
9302 {
9303 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9304 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9305 AssertRC(rc);
9306 }
9307
9308#ifndef IN_NEM_DARWIN
9309 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9310 VMMRZCallRing3Disable(pVCpu);
9311 HM_DISABLE_PREEMPT(pVCpu);
9312
9313 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9314 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9315 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9316
9317 HM_RESTORE_PREEMPT();
9318 VMMRZCallRing3Enable(pVCpu);
9319#else
9320 CPUMR3NemActivateGuestDebugState(pVCpu);
9321 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9322 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9323#endif
9324
9325 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9326 if (fResumeInstruction)
9327 {
9328#ifdef VBOX_WITH_STATISTICS
9329 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9330 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9331 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9332 else
9333 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9334#endif
9335 return VINF_SUCCESS;
9336 }
9337 }
9338 }
9339
9340 /*
9341 * Import state. We must have DR7 loaded here as it's always consulted,
9342 * both for reading and writing. The other debug registers are never
9343 * exported as such.
9344 */
9345 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9346 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9347 | CPUMCTX_EXTRN_GPRS_MASK
9348 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9349 AssertRCReturn(rc, rc);
9350
9351 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9352 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9353 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9354 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9355
9356 VBOXSTRICTRC rcStrict;
9357 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9358 {
9359 /*
9360 * Write DRx register.
9361 */
9362 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9363 AssertMsg( rcStrict == VINF_SUCCESS
9364 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9365
9366 if (rcStrict == VINF_SUCCESS)
9367 {
9368 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9369 * kept it for now to avoid breaking something non-obvious. */
9370 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9371 | HM_CHANGED_GUEST_DR7);
9372 /* Update the DR6 register if guest debug state is active, otherwise we'll
9373 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9374 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9375 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9376 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9377 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9378 }
9379 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9380 {
9381 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9382 rcStrict = VINF_SUCCESS;
9383 }
9384
9385 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9386 }
9387 else
9388 {
9389 /*
9390 * Read DRx register into a general purpose register.
9391 */
9392 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9393 AssertMsg( rcStrict == VINF_SUCCESS
9394 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9395
9396 if (rcStrict == VINF_SUCCESS)
9397 {
9398 if (iGReg == X86_GREG_xSP)
9399 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9400 | HM_CHANGED_GUEST_RSP);
9401 else
9402 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9403 }
9404 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9405 {
9406 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9407 rcStrict = VINF_SUCCESS;
9408 }
9409
9410 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9411 }
9412
9413 return rcStrict;
9414}
9415
9416
9417/**
9418 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9419 * Conditional VM-exit.
9420 */
9421HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9422{
9423 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9424
9425#ifndef IN_NEM_DARWIN
9426 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9427
9428 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9429 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9430 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9431 | HMVMX_READ_IDT_VECTORING_INFO
9432 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9433 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9434
9435 /*
9436 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9437 */
9438 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9439 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9440 {
9441 /*
9442 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9443 * instruction emulation to inject the original event. Otherwise, injecting the original event
9444 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9445 */
9446 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9447 { /* likely */ }
9448 else
9449 {
9450 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9451# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9452 /** @todo NSTVMX: Think about how this should be handled. */
9453 if (pVmxTransient->fIsNestedGuest)
9454 return VERR_VMX_IPE_3;
9455# endif
9456 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9457 }
9458 }
9459 else
9460 {
9461 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9462 return rcStrict;
9463 }
9464
9465 /*
9466 * Get sufficient state and update the exit history entry.
9467 */
9468 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9469 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9470 AssertRCReturn(rc, rc);
9471
9472 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9473 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9474 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9475 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9476 if (!pExitRec)
9477 {
9478 /*
9479 * If we succeed, resume guest execution.
9480 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9481 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9482 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9483 * weird case. See @bugref{6043}.
9484 */
9485 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9486/** @todo bird: We can probably just go straight to IOM here and assume that
9487 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9488 * well. However, we need to address that aliasing workarounds that
9489 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9490 *
9491 * Might also be interesting to see if we can get this done more or
9492 * less locklessly inside IOM. Need to consider the lookup table
9493 * updating and use a bit more carefully first (or do all updates via
9494 * rendezvous) */
9495 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9496 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9497 if ( rcStrict == VINF_SUCCESS
9498 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9499 || rcStrict == VERR_PAGE_NOT_PRESENT)
9500 {
9501 /* Successfully handled MMIO operation. */
9502 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9503 | HM_CHANGED_GUEST_APIC_TPR);
9504 rcStrict = VINF_SUCCESS;
9505 }
9506 }
9507 else
9508 {
9509 /*
9510 * Frequent exit or something needing probing. Call EMHistoryExec.
9511 */
9512 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9513 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9514
9515 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9516 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9517
9518 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9519 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9520 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9521 }
9522 return rcStrict;
9523#else
9524 AssertFailed();
9525 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9526#endif
9527}
9528
9529
9530/**
9531 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9532 * VM-exit.
9533 */
9534HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9535{
9536 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9537#ifndef IN_NEM_DARWIN
9538 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9539
9540 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9541 | HMVMX_READ_EXIT_INSTR_LEN
9542 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9543 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9544 | HMVMX_READ_IDT_VECTORING_INFO
9545 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9546 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9547
9548 /*
9549 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9550 */
9551 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9552 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9553 {
9554 /*
9555 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9556 * we shall resolve the nested #PF and re-inject the original event.
9557 */
9558 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9559 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9560 }
9561 else
9562 {
9563 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9564 return rcStrict;
9565 }
9566
9567 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9568 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9569 AssertRCReturn(rc, rc);
9570
9571 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9572 uint64_t const uExitQual = pVmxTransient->uExitQual;
9573 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9574
9575 RTGCUINT uErrorCode = 0;
9576 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9577 uErrorCode |= X86_TRAP_PF_ID;
9578 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9579 uErrorCode |= X86_TRAP_PF_RW;
9580 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9581 uErrorCode |= X86_TRAP_PF_P;
9582
9583 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9584 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9585
9586 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9587
9588 /*
9589 * Handle the pagefault trap for the nested shadow table.
9590 */
9591 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9592 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9593 TRPMResetTrap(pVCpu);
9594
9595 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9596 if ( rcStrict == VINF_SUCCESS
9597 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9598 || rcStrict == VERR_PAGE_NOT_PRESENT)
9599 {
9600 /* Successfully synced our nested page tables. */
9601 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9602 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9603 return VINF_SUCCESS;
9604 }
9605 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9606 return rcStrict;
9607
9608#else /* IN_NEM_DARWIN */
9609 PVM pVM = pVCpu->CTX_SUFF(pVM);
9610 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9611 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9612 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9613 vmxHCImportGuestRip(pVCpu);
9614 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9615
9616 /*
9617 * Ask PGM for information about the given GCPhys. We need to check if we're
9618 * out of sync first.
9619 */
9620 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9621 false,
9622 false };
9623 PGMPHYSNEMPAGEINFO Info;
9624 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9625 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9626 if (RT_SUCCESS(rc))
9627 {
9628 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9629 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9630 {
9631 if (State.fCanResume)
9632 {
9633 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9634 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9635 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9636 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9637 State.fDidSomething ? "" : " no-change"));
9638 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9639 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9640 return VINF_SUCCESS;
9641 }
9642 }
9643
9644 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9645 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9646 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9647 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9648 State.fDidSomething ? "" : " no-change"));
9649 }
9650 else
9651 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9652 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9653 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9654
9655 /*
9656 * Emulate the memory access, either access handler or special memory.
9657 */
9658 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9659 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9660 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9661 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9662 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9663
9664 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9665 AssertRCReturn(rc, rc);
9666
9667 VBOXSTRICTRC rcStrict;
9668 if (!pExitRec)
9669 rcStrict = IEMExecOne(pVCpu);
9670 else
9671 {
9672 /* Frequent access or probing. */
9673 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9674 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9675 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9676 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9677 }
9678
9679 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9680
9681 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9682 return rcStrict;
9683#endif /* IN_NEM_DARWIN */
9684}
9685
9686#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9687
9688/**
9689 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9690 */
9691HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9692{
9693 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9694
9695 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9696 | HMVMX_READ_EXIT_INSTR_INFO
9697 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9698 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9699 | CPUMCTX_EXTRN_SREG_MASK
9700 | CPUMCTX_EXTRN_HWVIRT
9701 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9702 AssertRCReturn(rc, rc);
9703
9704 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9705
9706 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9707 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9708
9709 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9710 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9711 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9712 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9713 {
9714 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9715 rcStrict = VINF_SUCCESS;
9716 }
9717 return rcStrict;
9718}
9719
9720
9721/**
9722 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9723 */
9724HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9725{
9726 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9727
9728 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9729 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9730 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9731 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9732 AssertRCReturn(rc, rc);
9733
9734 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9735
9736 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9737 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9738 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9739 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9740 {
9741 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9742 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9743 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9744 }
9745 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9746 return rcStrict;
9747}
9748
9749
9750/**
9751 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9752 */
9753HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9754{
9755 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9756
9757 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9758 | HMVMX_READ_EXIT_INSTR_INFO
9759 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9760 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9761 | CPUMCTX_EXTRN_SREG_MASK
9762 | CPUMCTX_EXTRN_HWVIRT
9763 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9764 AssertRCReturn(rc, rc);
9765
9766 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9767
9768 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9769 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9770
9771 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9772 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9773 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9774 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9775 {
9776 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9777 rcStrict = VINF_SUCCESS;
9778 }
9779 return rcStrict;
9780}
9781
9782
9783/**
9784 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9785 */
9786HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9787{
9788 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9789
9790 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9791 | HMVMX_READ_EXIT_INSTR_INFO
9792 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9793 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9794 | CPUMCTX_EXTRN_SREG_MASK
9795 | CPUMCTX_EXTRN_HWVIRT
9796 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9797 AssertRCReturn(rc, rc);
9798
9799 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9800
9801 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9802 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9803
9804 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9805 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9806 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9807 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9808 {
9809 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9810 rcStrict = VINF_SUCCESS;
9811 }
9812 return rcStrict;
9813}
9814
9815
9816/**
9817 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9818 */
9819HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9820{
9821 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9822
9823 /*
9824 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9825 * thus might not need to import the shadow VMCS state, it's safer just in case
9826 * code elsewhere dares look at unsynced VMCS fields.
9827 */
9828 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9829 | HMVMX_READ_EXIT_INSTR_INFO
9830 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9831 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9832 | CPUMCTX_EXTRN_SREG_MASK
9833 | CPUMCTX_EXTRN_HWVIRT
9834 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9835 AssertRCReturn(rc, rc);
9836
9837 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9838
9839 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9840 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9841 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9842
9843 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9844 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9845 {
9846 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9847
9848# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9849 /* Try for exit optimization. This is on the following instruction
9850 because it would be a waste of time to have to reinterpret the
9851 already decoded vmwrite instruction. */
9852 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9853 if (pExitRec)
9854 {
9855 /* Frequent access or probing. */
9856 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9857 AssertRCReturn(rc, rc);
9858
9859 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9860 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9861 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9862 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9863 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9864 }
9865# endif
9866 }
9867 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9868 {
9869 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9870 rcStrict = VINF_SUCCESS;
9871 }
9872 return rcStrict;
9873}
9874
9875
9876/**
9877 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9878 */
9879HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9880{
9881 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9882
9883 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9884 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9885 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9886 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9887 AssertRCReturn(rc, rc);
9888
9889 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9890
9891 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9892 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9893 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9894 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9895 {
9896 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9897 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9898 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9899 }
9900 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9901 return rcStrict;
9902}
9903
9904
9905/**
9906 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9907 */
9908HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9909{
9910 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9911
9912 /*
9913 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9914 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9915 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9916 */
9917 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9918 | HMVMX_READ_EXIT_INSTR_INFO
9919 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9920 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9921 | CPUMCTX_EXTRN_SREG_MASK
9922 | CPUMCTX_EXTRN_HWVIRT
9923 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9924 AssertRCReturn(rc, rc);
9925
9926 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9927
9928 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9929 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9930 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9931
9932 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9933 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9934 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9935 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9936 {
9937 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9938 rcStrict = VINF_SUCCESS;
9939 }
9940 return rcStrict;
9941}
9942
9943
9944/**
9945 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9946 */
9947HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9948{
9949 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9950
9951 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9952 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9953 | CPUMCTX_EXTRN_HWVIRT
9954 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9955 AssertRCReturn(rc, rc);
9956
9957 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9958
9959 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9960 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9961 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9962 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9963 {
9964 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9965 rcStrict = VINF_SUCCESS;
9966 }
9967 return rcStrict;
9968}
9969
9970
9971/**
9972 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9973 */
9974HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9975{
9976 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9977
9978 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9979 | HMVMX_READ_EXIT_INSTR_INFO
9980 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9981 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9982 | CPUMCTX_EXTRN_SREG_MASK
9983 | CPUMCTX_EXTRN_HWVIRT
9984 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9985 AssertRCReturn(rc, rc);
9986
9987 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9988
9989 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9990 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9991
9992 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9993 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9994 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9995 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9996 {
9997 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9998 rcStrict = VINF_SUCCESS;
9999 }
10000 return rcStrict;
10001}
10002
10003
10004/**
10005 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
10006 */
10007HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10008{
10009 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10010
10011 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10012 | HMVMX_READ_EXIT_INSTR_INFO
10013 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10014 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10015 | CPUMCTX_EXTRN_SREG_MASK
10016 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10017 AssertRCReturn(rc, rc);
10018
10019 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10020
10021 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10022 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10023
10024 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
10025 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10026 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10027 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10028 {
10029 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10030 rcStrict = VINF_SUCCESS;
10031 }
10032 return rcStrict;
10033}
10034
10035
10036# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10037/**
10038 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
10039 */
10040HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10041{
10042 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10043
10044 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10045 | HMVMX_READ_EXIT_INSTR_INFO
10046 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10047 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
10048 | CPUMCTX_EXTRN_SREG_MASK
10049 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10050 AssertRCReturn(rc, rc);
10051
10052 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
10053
10054 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10055 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
10056
10057 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
10058 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10059 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10060 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10061 {
10062 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10063 rcStrict = VINF_SUCCESS;
10064 }
10065 return rcStrict;
10066}
10067# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10068#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10069/** @} */
10070
10071
10072#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10073/** @name Nested-guest VM-exit handlers.
10074 * @{
10075 */
10076/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10077/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10078/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10079
10080/**
10081 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
10082 * Conditional VM-exit.
10083 */
10084HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10085{
10086 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10087
10088 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
10089
10090 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
10091 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
10092 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
10093
10094 switch (uExitIntType)
10095 {
10096# ifndef IN_NEM_DARWIN
10097 /*
10098 * Physical NMIs:
10099 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
10100 */
10101 case VMX_EXIT_INT_INFO_TYPE_NMI:
10102 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
10103# endif
10104
10105 /*
10106 * Hardware exceptions,
10107 * Software exceptions,
10108 * Privileged software exceptions:
10109 * Figure out if the exception must be delivered to the guest or the nested-guest.
10110 */
10111 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10112 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10113 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10114 {
10115 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10116 | HMVMX_READ_EXIT_INSTR_LEN
10117 | HMVMX_READ_IDT_VECTORING_INFO
10118 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10119
10120 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10121 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
10122 {
10123 /* Exit qualification is required for debug and page-fault exceptions. */
10124 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10125
10126 /*
10127 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
10128 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
10129 * length. However, if delivery of a software interrupt, software exception or privileged
10130 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
10131 */
10132 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10133 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
10134 pVmxTransient->uExitIntErrorCode,
10135 pVmxTransient->uIdtVectoringInfo,
10136 pVmxTransient->uIdtVectoringErrorCode);
10137#ifdef DEBUG_ramshankar
10138 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10139 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
10140 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
10141 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
10142 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
10143 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
10144#endif
10145 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
10146 }
10147
10148 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
10149 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10150 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10151 }
10152
10153 /*
10154 * Software interrupts:
10155 * VM-exits cannot be caused by software interrupts.
10156 *
10157 * External interrupts:
10158 * This should only happen when "acknowledge external interrupts on VM-exit"
10159 * control is set. However, we never set this when executing a guest or
10160 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10161 * the guest.
10162 */
10163 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10164 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10165 default:
10166 {
10167 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10168 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10169 }
10170 }
10171}
10172
10173
10174/**
10175 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10176 * Unconditional VM-exit.
10177 */
10178HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10179{
10180 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10181 return IEMExecVmxVmexitTripleFault(pVCpu);
10182}
10183
10184
10185/**
10186 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10187 */
10188HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10189{
10190 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10191
10192 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10193 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10194 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10195}
10196
10197
10198/**
10199 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10200 */
10201HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10202{
10203 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10204
10205 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10206 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10207 return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
10208}
10209
10210
10211/**
10212 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10213 * Unconditional VM-exit.
10214 */
10215HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10216{
10217 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10218
10219 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10220 | HMVMX_READ_EXIT_INSTR_LEN
10221 | HMVMX_READ_IDT_VECTORING_INFO
10222 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10223
10224 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10225 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10226 pVmxTransient->uIdtVectoringErrorCode);
10227 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10228}
10229
10230
10231/**
10232 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10233 */
10234HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10235{
10236 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10237
10238 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10239 {
10240 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10241 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10242 }
10243 return vmxHCExitHlt(pVCpu, pVmxTransient);
10244}
10245
10246
10247/**
10248 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10249 */
10250HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10251{
10252 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10253
10254 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10255 {
10256 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10257 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10258 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10259 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10260 }
10261 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10262}
10263
10264
10265/**
10266 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10267 */
10268HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10269{
10270 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10271
10272 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10273 {
10274 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10275 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10276 }
10277 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10278}
10279
10280
10281/**
10282 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10283 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10284 */
10285HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10286{
10287 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10288
10289 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10290 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10291
10292 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10293
10294 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10295 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10296 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10297
10298 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10299 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10300 u64VmcsField &= UINT64_C(0xffffffff);
10301
10302 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10303 {
10304 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10305 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10306 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10307 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10308 }
10309
10310 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10311 return vmxHCExitVmread(pVCpu, pVmxTransient);
10312 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10313}
10314
10315
10316/**
10317 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10318 */
10319HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10320{
10321 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10322
10323 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10324 {
10325 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10326 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10327 }
10328
10329 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10330}
10331
10332
10333/**
10334 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10335 * Conditional VM-exit.
10336 */
10337HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10338{
10339 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10340
10341 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10342 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10343
10344 VBOXSTRICTRC rcStrict;
10345 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10346 switch (uAccessType)
10347 {
10348 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10349 {
10350 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10351 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10352 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10353 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10354
10355 bool fIntercept;
10356 switch (iCrReg)
10357 {
10358 case 0:
10359 case 4:
10360 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10361 break;
10362
10363 case 3:
10364 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10365 break;
10366
10367 case 8:
10368 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10369 break;
10370
10371 default:
10372 fIntercept = false;
10373 break;
10374 }
10375 if (fIntercept)
10376 {
10377 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10378 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10379 }
10380 else
10381 {
10382 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10383 AssertRCReturn(rc, rc);
10384 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10385 }
10386 break;
10387 }
10388
10389 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10390 {
10391 /*
10392 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10393 * CR2 reads do not cause a VM-exit.
10394 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10395 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10396 */
10397 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10398 if ( iCrReg == 3
10399 || iCrReg == 8)
10400 {
10401 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10402 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10403 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10404 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10405 {
10406 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10407 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10408 }
10409 else
10410 {
10411 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10412 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10413 }
10414 }
10415 else
10416 {
10417 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10418 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10419 }
10420 break;
10421 }
10422
10423 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10424 {
10425 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10426 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10427 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10428 if ( (uGstHostMask & X86_CR0_TS)
10429 && (uReadShadow & X86_CR0_TS))
10430 {
10431 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10432 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10433 }
10434 else
10435 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10436 break;
10437 }
10438
10439 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10440 {
10441 RTGCPTR GCPtrEffDst;
10442 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10443 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10444 if (fMemOperand)
10445 {
10446 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10447 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10448 }
10449 else
10450 GCPtrEffDst = NIL_RTGCPTR;
10451
10452 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10453 {
10454 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10455 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10456 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10457 }
10458 else
10459 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10460 break;
10461 }
10462
10463 default:
10464 {
10465 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10466 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10467 }
10468 }
10469
10470 if (rcStrict == VINF_IEM_RAISED_XCPT)
10471 {
10472 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10473 rcStrict = VINF_SUCCESS;
10474 }
10475 return rcStrict;
10476}
10477
10478
10479/**
10480 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10481 * Conditional VM-exit.
10482 */
10483HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10484{
10485 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10486
10487 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10488 {
10489 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10490 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10491 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10492 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10493 }
10494 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10495}
10496
10497
10498/**
10499 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10500 * Conditional VM-exit.
10501 */
10502HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10503{
10504 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10505
10506 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10507
10508 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10509 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10510 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10511
10512 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10513 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10514 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10515 {
10516 /*
10517 * IN/OUT instruction:
10518 * - Provides VM-exit instruction length.
10519 *
10520 * INS/OUTS instruction:
10521 * - Provides VM-exit instruction length.
10522 * - Provides Guest-linear address.
10523 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10524 */
10525 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10526 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10527
10528 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10529 pVmxTransient->ExitInstrInfo.u = 0;
10530 pVmxTransient->uGuestLinearAddr = 0;
10531
10532 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10533 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10534 if (fIOString)
10535 {
10536 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10537 if (fVmxInsOutsInfo)
10538 {
10539 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10540 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10541 }
10542 }
10543
10544 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10545 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10546 }
10547 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10548}
10549
10550
10551/**
10552 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10553 */
10554HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10555{
10556 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10557
10558 uint32_t fMsrpm;
10559 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10560 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10561 else
10562 fMsrpm = VMXMSRPM_EXIT_RD;
10563
10564 if (fMsrpm & VMXMSRPM_EXIT_RD)
10565 {
10566 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10567 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10568 }
10569 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10570}
10571
10572
10573/**
10574 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10575 */
10576HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10577{
10578 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10579
10580 uint32_t fMsrpm;
10581 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10582 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10583 else
10584 fMsrpm = VMXMSRPM_EXIT_WR;
10585
10586 if (fMsrpm & VMXMSRPM_EXIT_WR)
10587 {
10588 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10589 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10590 }
10591 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10592}
10593
10594
10595/**
10596 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10597 */
10598HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10599{
10600 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10601
10602 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10603 {
10604 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10605 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10606 }
10607 return vmxHCExitMwait(pVCpu, pVmxTransient);
10608}
10609
10610
10611/**
10612 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10613 * VM-exit.
10614 */
10615HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10616{
10617 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10618
10619 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10620 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10621 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10622 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10623}
10624
10625
10626/**
10627 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10628 */
10629HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10630{
10631 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10632
10633 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10634 {
10635 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10636 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10637 }
10638 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10639}
10640
10641
10642/**
10643 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10644 */
10645HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10646{
10647 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10648
10649 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10650 * PAUSE when executing a nested-guest? If it does not, we would not need
10651 * to check for the intercepts here. Just call VM-exit... */
10652
10653 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10654 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10655 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10656 {
10657 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10658 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10659 }
10660 return vmxHCExitPause(pVCpu, pVmxTransient);
10661}
10662
10663
10664/**
10665 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10666 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10667 */
10668HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10669{
10670 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10671
10672 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10673 {
10674 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10675 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10676 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10677 }
10678 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10679}
10680
10681
10682/**
10683 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10684 * VM-exit.
10685 */
10686HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10687{
10688 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10689
10690 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10691 | HMVMX_READ_EXIT_INSTR_LEN
10692 | HMVMX_READ_IDT_VECTORING_INFO
10693 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10694
10695 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10696
10697 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10698 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10699
10700 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10701 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10702 pVmxTransient->uIdtVectoringErrorCode);
10703 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10704}
10705
10706
10707/**
10708 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10709 * Conditional VM-exit.
10710 */
10711HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10712{
10713 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10714
10715 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10716 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10717 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10718}
10719
10720
10721/**
10722 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10723 * Conditional VM-exit.
10724 */
10725HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10726{
10727 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10728
10729 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10730 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10731 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10732}
10733
10734
10735/**
10736 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10737 */
10738HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10739{
10740 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10741
10742 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10743 {
10744 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10745 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10746 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10747 }
10748 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10749}
10750
10751
10752/**
10753 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10754 */
10755HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10756{
10757 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10758
10759 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10760 {
10761 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10762 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10763 }
10764 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10765}
10766
10767
10768/**
10769 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10770 */
10771HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10772{
10773 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10774
10775 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10776 {
10777 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10778 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10779 | HMVMX_READ_EXIT_INSTR_INFO
10780 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10781 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10782 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10783 }
10784 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10785}
10786
10787
10788/**
10789 * Nested-guest VM-exit handler for invalid-guest state
10790 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10791 */
10792HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10793{
10794 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10795
10796 /*
10797 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10798 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10799 * Handle it like it's in an invalid guest state of the outer guest.
10800 *
10801 * When the fast path is implemented, this should be changed to cause the corresponding
10802 * nested-guest VM-exit.
10803 */
10804 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10805}
10806
10807
10808/**
10809 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10810 * and only provide the instruction length.
10811 *
10812 * Unconditional VM-exit.
10813 */
10814HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10815{
10816 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10817
10818#ifdef VBOX_STRICT
10819 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10820 switch (pVmxTransient->uExitReason)
10821 {
10822 case VMX_EXIT_ENCLS:
10823 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10824 break;
10825
10826 case VMX_EXIT_VMFUNC:
10827 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10828 break;
10829 }
10830#endif
10831
10832 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10833 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10834}
10835
10836
10837/**
10838 * Nested-guest VM-exit handler for instructions that provide instruction length as
10839 * well as more information.
10840 *
10841 * Unconditional VM-exit.
10842 */
10843HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10844{
10845 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10846
10847# ifdef VBOX_STRICT
10848 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10849 switch (pVmxTransient->uExitReason)
10850 {
10851 case VMX_EXIT_GDTR_IDTR_ACCESS:
10852 case VMX_EXIT_LDTR_TR_ACCESS:
10853 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10854 break;
10855
10856 case VMX_EXIT_RDRAND:
10857 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10858 break;
10859
10860 case VMX_EXIT_RDSEED:
10861 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10862 break;
10863
10864 case VMX_EXIT_XSAVES:
10865 case VMX_EXIT_XRSTORS:
10866 /** @todo NSTVMX: Verify XSS-bitmap. */
10867 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10868 break;
10869
10870 case VMX_EXIT_UMWAIT:
10871 case VMX_EXIT_TPAUSE:
10872 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10873 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10874 break;
10875
10876 case VMX_EXIT_LOADIWKEY:
10877 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10878 break;
10879 }
10880# endif
10881
10882 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10883 | HMVMX_READ_EXIT_INSTR_LEN
10884 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10885 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10886 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10887}
10888
10889# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10890
10891/**
10892 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10893 * Conditional VM-exit.
10894 */
10895HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10896{
10897 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10898 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10899
10900 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10901 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10902 {
10903 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10904 | HMVMX_READ_EXIT_INSTR_LEN
10905 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10906 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10907 | HMVMX_READ_IDT_VECTORING_INFO
10908 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10909 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10910 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10911 AssertRCReturn(rc, rc);
10912
10913 /*
10914 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10915 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10916 * it's its problem to deal with that issue and we'll clear the recovered event.
10917 */
10918 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10919 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10920 { /*likely*/ }
10921 else
10922 {
10923 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10924 return rcStrict;
10925 }
10926 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10927
10928 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10929 uint64_t const uExitQual = pVmxTransient->uExitQual;
10930
10931 RTGCPTR GCPtrNestedFault;
10932 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10933 if (fIsLinearAddrValid)
10934 {
10935 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10936 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10937 }
10938 else
10939 GCPtrNestedFault = 0;
10940
10941 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10942 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10943 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10944 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10945 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10946
10947 PGMPTWALK Walk;
10948 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10949 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10950 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10951 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10952 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10953 if (RT_SUCCESS(rcStrict))
10954 {
10955 if (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE)
10956 {
10957 Assert(!fClearEventOnForward);
10958 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM));
10959 rcStrict = VINF_EM_RESCHEDULE_REM;
10960 }
10961 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
10962 return rcStrict;
10963 }
10964
10965 if (fClearEventOnForward)
10966 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10967
10968 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10969 pVmxTransient->uIdtVectoringErrorCode);
10970 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10971 {
10972 VMXVEXITINFO const ExitInfo
10973 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10974 pVmxTransient->uExitQual,
10975 pVmxTransient->cbExitInstr,
10976 pVmxTransient->uGuestLinearAddr,
10977 pVmxTransient->uGuestPhysicalAddr);
10978 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10979 }
10980
10981 AssertMsgReturn(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG,
10982 ("uErr=%#RX32 uExitQual=%#RX64 GCPhysNestedFault=%#RGp GCPtrNestedFault=%#RGv\n",
10983 (uint32_t)uErr, uExitQual, GCPhysNestedFault, GCPtrNestedFault),
10984 rcStrict);
10985 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10986 }
10987
10988 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10989}
10990
10991
10992/**
10993 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10994 * Conditional VM-exit.
10995 */
10996HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10997{
10998 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10999 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
11000
11001 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11002 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
11003 {
11004 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
11005 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
11006 AssertRCReturn(rc, rc);
11007
11008 PGMPTWALK Walk;
11009 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11010 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
11011 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
11012 GCPhysNestedFault, false /* fIsLinearAddrValid */,
11013 0 /* GCPtrNestedFault */, &Walk);
11014 if (RT_SUCCESS(rcStrict))
11015 {
11016 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
11017 return rcStrict;
11018 }
11019
11020 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
11021 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
11022 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
11023
11024 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
11025 pVmxTransient->uIdtVectoringErrorCode);
11026 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
11027 }
11028
11029 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
11030}
11031
11032# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
11033
11034/** @} */
11035#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
11036
11037
11038/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
11039 * probes.
11040 *
11041 * The following few functions and associated structure contains the bloat
11042 * necessary for providing detailed debug events and dtrace probes as well as
11043 * reliable host side single stepping. This works on the principle of
11044 * "subclassing" the normal execution loop and workers. We replace the loop
11045 * method completely and override selected helpers to add necessary adjustments
11046 * to their core operation.
11047 *
11048 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
11049 * any performance for debug and analysis features.
11050 *
11051 * @{
11052 */
11053
11054/**
11055 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
11056 * the debug run loop.
11057 */
11058typedef struct VMXRUNDBGSTATE
11059{
11060 /** The RIP we started executing at. This is for detecting that we stepped. */
11061 uint64_t uRipStart;
11062 /** The CS we started executing with. */
11063 uint16_t uCsStart;
11064
11065 /** Whether we've actually modified the 1st execution control field. */
11066 bool fModifiedProcCtls : 1;
11067 /** Whether we've actually modified the 2nd execution control field. */
11068 bool fModifiedProcCtls2 : 1;
11069 /** Whether we've actually modified the exception bitmap. */
11070 bool fModifiedXcptBitmap : 1;
11071
11072 /** We desire the modified the CR0 mask to be cleared. */
11073 bool fClearCr0Mask : 1;
11074 /** We desire the modified the CR4 mask to be cleared. */
11075 bool fClearCr4Mask : 1;
11076 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
11077 uint32_t fCpe1Extra;
11078 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
11079 uint32_t fCpe1Unwanted;
11080 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
11081 uint32_t fCpe2Extra;
11082 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
11083 uint32_t bmXcptExtra;
11084 /** The sequence number of the Dtrace provider settings the state was
11085 * configured against. */
11086 uint32_t uDtraceSettingsSeqNo;
11087 /** VM-exits to check (one bit per VM-exit). */
11088 uint32_t bmExitsToCheck[3];
11089
11090 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
11091 uint32_t fProcCtlsInitial;
11092 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
11093 uint32_t fProcCtls2Initial;
11094 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
11095 uint32_t bmXcptInitial;
11096} VMXRUNDBGSTATE;
11097AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
11098typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
11099
11100
11101/**
11102 * Initializes the VMXRUNDBGSTATE structure.
11103 *
11104 * @param pVCpu The cross context virtual CPU structure of the
11105 * calling EMT.
11106 * @param pVmxTransient The VMX-transient structure.
11107 * @param pDbgState The debug state to initialize.
11108 */
11109static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11110{
11111 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
11112 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
11113
11114 pDbgState->fModifiedProcCtls = false;
11115 pDbgState->fModifiedProcCtls2 = false;
11116 pDbgState->fModifiedXcptBitmap = false;
11117 pDbgState->fClearCr0Mask = false;
11118 pDbgState->fClearCr4Mask = false;
11119 pDbgState->fCpe1Extra = 0;
11120 pDbgState->fCpe1Unwanted = 0;
11121 pDbgState->fCpe2Extra = 0;
11122 pDbgState->bmXcptExtra = 0;
11123 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11124 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11125 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11126}
11127
11128
11129/**
11130 * Updates the VMSC fields with changes requested by @a pDbgState.
11131 *
11132 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11133 * immediately before executing guest code, i.e. when interrupts are disabled.
11134 * We don't check status codes here as we cannot easily assert or return in the
11135 * latter case.
11136 *
11137 * @param pVCpu The cross context virtual CPU structure.
11138 * @param pVmxTransient The VMX-transient structure.
11139 * @param pDbgState The debug state.
11140 */
11141static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11142{
11143 /*
11144 * Ensure desired flags in VMCS control fields are set.
11145 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11146 *
11147 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11148 * there should be no stale data in pCtx at this point.
11149 */
11150 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11151 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11152 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11153 {
11154 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11155 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11156 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11157 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11158 pDbgState->fModifiedProcCtls = true;
11159 }
11160
11161 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11162 {
11163 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11164 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11165 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11166 pDbgState->fModifiedProcCtls2 = true;
11167 }
11168
11169 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11170 {
11171 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11172 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11173 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11174 pDbgState->fModifiedXcptBitmap = true;
11175 }
11176
11177 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11178 {
11179 pVmcsInfo->u64Cr0Mask = 0;
11180 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11181 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11182 }
11183
11184 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11185 {
11186 pVmcsInfo->u64Cr4Mask = 0;
11187 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11188 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11189 }
11190
11191 NOREF(pVCpu);
11192}
11193
11194
11195/**
11196 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11197 * re-entry next time around.
11198 *
11199 * @returns Strict VBox status code (i.e. informational status codes too).
11200 * @param pVCpu The cross context virtual CPU structure.
11201 * @param pVmxTransient The VMX-transient structure.
11202 * @param pDbgState The debug state.
11203 * @param rcStrict The return code from executing the guest using single
11204 * stepping.
11205 */
11206static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11207 VBOXSTRICTRC rcStrict)
11208{
11209 /*
11210 * Restore VM-exit control settings as we may not reenter this function the
11211 * next time around.
11212 */
11213 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11214
11215 /* We reload the initial value, trigger what we can of recalculations the
11216 next time around. From the looks of things, that's all that's required atm. */
11217 if (pDbgState->fModifiedProcCtls)
11218 {
11219 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11220 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11221 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11222 AssertRC(rc2);
11223 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11224 }
11225
11226 /* We're currently the only ones messing with this one, so just restore the
11227 cached value and reload the field. */
11228 if ( pDbgState->fModifiedProcCtls2
11229 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11230 {
11231 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11232 AssertRC(rc2);
11233 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11234 }
11235
11236 /* If we've modified the exception bitmap, we restore it and trigger
11237 reloading and partial recalculation the next time around. */
11238 if (pDbgState->fModifiedXcptBitmap)
11239 {
11240 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11241 AssertRC(rc2);
11242 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11243 }
11244
11245 return rcStrict;
11246}
11247
11248
11249/**
11250 * Configures VM-exit controls for current DBGF and DTrace settings.
11251 *
11252 * This updates @a pDbgState and the VMCS execution control fields to reflect
11253 * the necessary VM-exits demanded by DBGF and DTrace.
11254 *
11255 * @param pVCpu The cross context virtual CPU structure.
11256 * @param pVmxTransient The VMX-transient structure. May update
11257 * fUpdatedTscOffsettingAndPreemptTimer.
11258 * @param pDbgState The debug state.
11259 */
11260static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11261{
11262#ifndef IN_NEM_DARWIN
11263 /*
11264 * Take down the dtrace serial number so we can spot changes.
11265 */
11266 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11267 ASMCompilerBarrier();
11268#endif
11269
11270 /*
11271 * We'll rebuild most of the middle block of data members (holding the
11272 * current settings) as we go along here, so start by clearing it all.
11273 */
11274 pDbgState->bmXcptExtra = 0;
11275 pDbgState->fCpe1Extra = 0;
11276 pDbgState->fCpe1Unwanted = 0;
11277 pDbgState->fCpe2Extra = 0;
11278 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11279 pDbgState->bmExitsToCheck[i] = 0;
11280
11281 /*
11282 * Software interrupts (INT XXh) - no idea how to trigger these...
11283 */
11284 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11285 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11286 || VBOXVMM_INT_SOFTWARE_ENABLED())
11287 {
11288 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11289 }
11290
11291 /*
11292 * INT3 breakpoints - triggered by #BP exceptions.
11293 */
11294 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11295 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11296
11297 /*
11298 * Exception bitmap and XCPT events+probes.
11299 */
11300 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11301 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11302 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11303
11304 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11305 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11306 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11307 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11308 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11309 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11310 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11311 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11312 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11313 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11314 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11315 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11316 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11317 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11318 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11319 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11320 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11321 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11322
11323 if (pDbgState->bmXcptExtra)
11324 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11325
11326 /*
11327 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11328 *
11329 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11330 * So, when adding/changing/removing please don't forget to update it.
11331 *
11332 * Some of the macros are picking up local variables to save horizontal space,
11333 * (being able to see it in a table is the lesser evil here).
11334 */
11335#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11336 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11337 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11338#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11339 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11340 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11341 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11342 } else do { } while (0)
11343#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11344 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11345 { \
11346 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11347 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11348 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11349 } else do { } while (0)
11350#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11351 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11352 { \
11353 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11354 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11355 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11356 } else do { } while (0)
11357#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11358 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11359 { \
11360 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11361 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11362 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11363 } else do { } while (0)
11364
11365 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11366 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11367 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11368 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11369 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11370
11371 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11372 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11373 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11374 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11375 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11376 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11377 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11378 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11379 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11380 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11381 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11382 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11383 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11384 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11385 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11386 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11387 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11388 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11389 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11390 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11391 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11392 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11393 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11394 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11395 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11396 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11397 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11398 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11399 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11400 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11401 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11402 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11403 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11404 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11405 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11406 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11407
11408 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11409 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11410 {
11411 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11412 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11413 AssertRC(rc);
11414
11415#if 0 /** @todo fix me */
11416 pDbgState->fClearCr0Mask = true;
11417 pDbgState->fClearCr4Mask = true;
11418#endif
11419 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11420 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11421 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11422 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11423 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11424 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11425 require clearing here and in the loop if we start using it. */
11426 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11427 }
11428 else
11429 {
11430 if (pDbgState->fClearCr0Mask)
11431 {
11432 pDbgState->fClearCr0Mask = false;
11433 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11434 }
11435 if (pDbgState->fClearCr4Mask)
11436 {
11437 pDbgState->fClearCr4Mask = false;
11438 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11439 }
11440 }
11441 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11442 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11443
11444 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11445 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11446 {
11447 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11448 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11449 }
11450 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11451 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11452
11453 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11454 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11455 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11456 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11457 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11458 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11459 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11460 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11461#if 0 /** @todo too slow, fix handler. */
11462 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11463#endif
11464 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11465
11466 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11467 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11468 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11469 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11470 {
11471 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11472 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11473 }
11474 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11475 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11476 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11477 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11478
11479 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11480 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11481 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11482 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11483 {
11484 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11485 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11486 }
11487 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11488 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11489 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11490 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11491
11492 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11493 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11494 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11495 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11496 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11497 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11498 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11499 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11500 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11501 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11502 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11503 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11504 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11505 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11506 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11507 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11508 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11509 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11510 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11511 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11512 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11513 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11514
11515#undef IS_EITHER_ENABLED
11516#undef SET_ONLY_XBM_IF_EITHER_EN
11517#undef SET_CPE1_XBM_IF_EITHER_EN
11518#undef SET_CPEU_XBM_IF_EITHER_EN
11519#undef SET_CPE2_XBM_IF_EITHER_EN
11520
11521 /*
11522 * Sanitize the control stuff.
11523 */
11524 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11525 if (pDbgState->fCpe2Extra)
11526 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11527 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11528 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11529#ifndef IN_NEM_DARWIN
11530 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11531 {
11532 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11533 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11534 }
11535#else
11536 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11537 {
11538 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11539 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11540 }
11541#endif
11542
11543 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11544 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11545 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11546 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11547}
11548
11549
11550/**
11551 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11552 * appropriate.
11553 *
11554 * The caller has checked the VM-exit against the
11555 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11556 * already, so we don't have to do that either.
11557 *
11558 * @returns Strict VBox status code (i.e. informational status codes too).
11559 * @param pVCpu The cross context virtual CPU structure.
11560 * @param pVmxTransient The VMX-transient structure.
11561 * @param uExitReason The VM-exit reason.
11562 *
11563 * @remarks The name of this function is displayed by dtrace, so keep it short
11564 * and to the point. No longer than 33 chars long, please.
11565 */
11566static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11567{
11568 /*
11569 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11570 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11571 *
11572 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11573 * does. Must add/change/remove both places. Same ordering, please.
11574 *
11575 * Added/removed events must also be reflected in the next section
11576 * where we dispatch dtrace events.
11577 */
11578 bool fDtrace1 = false;
11579 bool fDtrace2 = false;
11580 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11581 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11582 uint32_t uEventArg = 0;
11583#define SET_EXIT(a_EventSubName) \
11584 do { \
11585 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11586 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11587 } while (0)
11588#define SET_BOTH(a_EventSubName) \
11589 do { \
11590 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11591 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11592 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11593 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11594 } while (0)
11595 switch (uExitReason)
11596 {
11597 case VMX_EXIT_MTF:
11598 return vmxHCExitMtf(pVCpu, pVmxTransient);
11599
11600 case VMX_EXIT_XCPT_OR_NMI:
11601 {
11602 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11603 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11604 {
11605 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11606 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11607 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11608 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11609 {
11610 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11611 {
11612 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11613 uEventArg = pVmxTransient->uExitIntErrorCode;
11614 }
11615 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11616 switch (enmEvent1)
11617 {
11618 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11619 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11620 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11621 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11622 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11623 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11624 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11625 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11626 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11627 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11628 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11629 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11630 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11631 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11632 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11633 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11634 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11635 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11636 default: break;
11637 }
11638 }
11639 else
11640 AssertFailed();
11641 break;
11642
11643 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11644 uEventArg = idxVector;
11645 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11646 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11647 break;
11648 }
11649 break;
11650 }
11651
11652 case VMX_EXIT_TRIPLE_FAULT:
11653 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11654 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11655 break;
11656 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11657 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11658 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11659 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11660 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11661
11662 /* Instruction specific VM-exits: */
11663 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11664 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11665 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11666 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11667 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11668 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11669 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11670 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11671 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11672 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11673 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11674 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11675 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11676 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11677 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11678 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11679 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11680 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11681 case VMX_EXIT_MOV_CRX:
11682 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11683 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11684 SET_BOTH(CRX_READ);
11685 else
11686 SET_BOTH(CRX_WRITE);
11687 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11688 break;
11689 case VMX_EXIT_MOV_DRX:
11690 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11691 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11692 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11693 SET_BOTH(DRX_READ);
11694 else
11695 SET_BOTH(DRX_WRITE);
11696 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11697 break;
11698 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11699 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11700 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11701 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11702 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11703 case VMX_EXIT_GDTR_IDTR_ACCESS:
11704 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11705 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11706 {
11707 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11708 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11709 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11710 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11711 }
11712 break;
11713
11714 case VMX_EXIT_LDTR_TR_ACCESS:
11715 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11716 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11717 {
11718 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11719 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11720 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11721 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11722 }
11723 break;
11724
11725 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11726 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11727 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11728 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11729 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11730 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11731 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11732 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11733 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11734 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11735 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11736
11737 /* Events that aren't relevant at this point. */
11738 case VMX_EXIT_EXT_INT:
11739 case VMX_EXIT_INT_WINDOW:
11740 case VMX_EXIT_NMI_WINDOW:
11741 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11742 case VMX_EXIT_PREEMPT_TIMER:
11743 case VMX_EXIT_IO_INSTR:
11744 break;
11745
11746 /* Errors and unexpected events. */
11747 case VMX_EXIT_INIT_SIGNAL:
11748 case VMX_EXIT_SIPI:
11749 case VMX_EXIT_IO_SMI:
11750 case VMX_EXIT_SMI:
11751 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11752 case VMX_EXIT_ERR_MSR_LOAD:
11753 case VMX_EXIT_ERR_MACHINE_CHECK:
11754 case VMX_EXIT_PML_FULL:
11755 case VMX_EXIT_VIRTUALIZED_EOI:
11756 break;
11757
11758 default:
11759 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11760 break;
11761 }
11762#undef SET_BOTH
11763#undef SET_EXIT
11764
11765 /*
11766 * Dtrace tracepoints go first. We do them here at once so we don't
11767 * have to copy the guest state saving and stuff a few dozen times.
11768 * Down side is that we've got to repeat the switch, though this time
11769 * we use enmEvent since the probes are a subset of what DBGF does.
11770 */
11771 if (fDtrace1 || fDtrace2)
11772 {
11773 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11774 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11775 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; RT_NOREF(pCtx); /* Shut up Clang 13. */
11776 switch (enmEvent1)
11777 {
11778 /** @todo consider which extra parameters would be helpful for each probe. */
11779 case DBGFEVENT_END: break;
11780 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11781 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11782 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11783 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11784 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11785 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11786 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11787 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11788 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11789 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11790 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11791 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11792 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11793 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11794 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11795 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11796 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11797 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11798 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11799 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11800 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11801 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11802 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11803 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11804 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11805 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11806 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11807 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11808 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11809 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11810 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11811 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11812 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11813 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11814 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11815 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11816 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11817 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11818 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11819 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11820 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11821 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11822 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11823 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11824 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11825 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11826 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11827 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11828 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11829 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11830 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11831 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11832 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11833 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11834 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11835 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11836 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11837 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11838 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11839 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11840 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11841 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11842 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11843 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11844 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11845 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11846 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11847 }
11848 switch (enmEvent2)
11849 {
11850 /** @todo consider which extra parameters would be helpful for each probe. */
11851 case DBGFEVENT_END: break;
11852 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11853 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11854 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11855 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11856 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11857 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11858 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11859 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11860 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11861 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11862 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11863 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11864 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11865 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11866 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11867 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11868 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11869 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11870 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11871 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11872 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11873 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11874 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11875 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11876 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11877 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11878 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11879 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11880 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11881 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11882 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11883 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11884 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11885 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11886 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11887 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11888 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11889 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11890 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11891 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11892 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11893 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11894 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11895 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11896 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11897 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11898 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11899 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11900 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11901 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11902 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11903 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11904 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11905 }
11906 }
11907
11908 /*
11909 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11910 * the DBGF call will do a full check).
11911 *
11912 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11913 * Note! If we have to events, we prioritize the first, i.e. the instruction
11914 * one, in order to avoid event nesting.
11915 */
11916 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11917 if ( enmEvent1 != DBGFEVENT_END
11918 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11919 {
11920 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11921 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11922 if (rcStrict != VINF_SUCCESS)
11923 return rcStrict;
11924 }
11925 else if ( enmEvent2 != DBGFEVENT_END
11926 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11927 {
11928 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11929 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11930 if (rcStrict != VINF_SUCCESS)
11931 return rcStrict;
11932 }
11933
11934 return VINF_SUCCESS;
11935}
11936
11937
11938/**
11939 * Single-stepping VM-exit filtering.
11940 *
11941 * This is preprocessing the VM-exits and deciding whether we've gotten far
11942 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11943 * handling is performed.
11944 *
11945 * @returns Strict VBox status code (i.e. informational status codes too).
11946 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11947 * @param pVmxTransient The VMX-transient structure.
11948 * @param pDbgState The debug state.
11949 */
11950DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11951{
11952 /*
11953 * Expensive (saves context) generic dtrace VM-exit probe.
11954 */
11955 uint32_t const uExitReason = pVmxTransient->uExitReason;
11956 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11957 { /* more likely */ }
11958 else
11959 {
11960 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11961 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11962 AssertRC(rc);
11963 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11964 }
11965
11966#ifndef IN_NEM_DARWIN
11967 /*
11968 * Check for host NMI, just to get that out of the way.
11969 */
11970 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11971 { /* normally likely */ }
11972 else
11973 {
11974 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11975 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11976 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11977 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11978 }
11979#endif
11980
11981 /*
11982 * Check for single stepping event if we're stepping.
11983 */
11984 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11985 {
11986 switch (uExitReason)
11987 {
11988 case VMX_EXIT_MTF:
11989 return vmxHCExitMtf(pVCpu, pVmxTransient);
11990
11991 /* Various events: */
11992 case VMX_EXIT_XCPT_OR_NMI:
11993 case VMX_EXIT_EXT_INT:
11994 case VMX_EXIT_TRIPLE_FAULT:
11995 case VMX_EXIT_INT_WINDOW:
11996 case VMX_EXIT_NMI_WINDOW:
11997 case VMX_EXIT_TASK_SWITCH:
11998 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11999 case VMX_EXIT_APIC_ACCESS:
12000 case VMX_EXIT_EPT_VIOLATION:
12001 case VMX_EXIT_EPT_MISCONFIG:
12002 case VMX_EXIT_PREEMPT_TIMER:
12003
12004 /* Instruction specific VM-exits: */
12005 case VMX_EXIT_CPUID:
12006 case VMX_EXIT_GETSEC:
12007 case VMX_EXIT_HLT:
12008 case VMX_EXIT_INVD:
12009 case VMX_EXIT_INVLPG:
12010 case VMX_EXIT_RDPMC:
12011 case VMX_EXIT_RDTSC:
12012 case VMX_EXIT_RSM:
12013 case VMX_EXIT_VMCALL:
12014 case VMX_EXIT_VMCLEAR:
12015 case VMX_EXIT_VMLAUNCH:
12016 case VMX_EXIT_VMPTRLD:
12017 case VMX_EXIT_VMPTRST:
12018 case VMX_EXIT_VMREAD:
12019 case VMX_EXIT_VMRESUME:
12020 case VMX_EXIT_VMWRITE:
12021 case VMX_EXIT_VMXOFF:
12022 case VMX_EXIT_VMXON:
12023 case VMX_EXIT_MOV_CRX:
12024 case VMX_EXIT_MOV_DRX:
12025 case VMX_EXIT_IO_INSTR:
12026 case VMX_EXIT_RDMSR:
12027 case VMX_EXIT_WRMSR:
12028 case VMX_EXIT_MWAIT:
12029 case VMX_EXIT_MONITOR:
12030 case VMX_EXIT_PAUSE:
12031 case VMX_EXIT_GDTR_IDTR_ACCESS:
12032 case VMX_EXIT_LDTR_TR_ACCESS:
12033 case VMX_EXIT_INVEPT:
12034 case VMX_EXIT_RDTSCP:
12035 case VMX_EXIT_INVVPID:
12036 case VMX_EXIT_WBINVD:
12037 case VMX_EXIT_XSETBV:
12038 case VMX_EXIT_RDRAND:
12039 case VMX_EXIT_INVPCID:
12040 case VMX_EXIT_VMFUNC:
12041 case VMX_EXIT_RDSEED:
12042 case VMX_EXIT_XSAVES:
12043 case VMX_EXIT_XRSTORS:
12044 {
12045 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
12046 AssertRCReturn(rc, rc);
12047 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
12048 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
12049 return VINF_EM_DBG_STEPPED;
12050 break;
12051 }
12052
12053 /* Errors and unexpected events: */
12054 case VMX_EXIT_INIT_SIGNAL:
12055 case VMX_EXIT_SIPI:
12056 case VMX_EXIT_IO_SMI:
12057 case VMX_EXIT_SMI:
12058 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
12059 case VMX_EXIT_ERR_MSR_LOAD:
12060 case VMX_EXIT_ERR_MACHINE_CHECK:
12061 case VMX_EXIT_PML_FULL:
12062 case VMX_EXIT_VIRTUALIZED_EOI:
12063 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
12064 break;
12065
12066 default:
12067 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
12068 break;
12069 }
12070 }
12071
12072 /*
12073 * Check for debugger event breakpoints and dtrace probes.
12074 */
12075 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
12076 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
12077 {
12078 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
12079 if (rcStrict != VINF_SUCCESS)
12080 return rcStrict;
12081 }
12082
12083 /*
12084 * Normal processing.
12085 */
12086#ifdef HMVMX_USE_FUNCTION_TABLE
12087 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
12088#else
12089 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
12090#endif
12091}
12092
12093/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette