VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 98505

Last change on this file since 98505 was 98505, checked in by vboxsync, 2 years ago

VMM: Nested VMX: bugref:10318 Build fix, unused parameter on Darwin, remove it.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 519.8 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 98505 2023-02-08 14:53:57Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Defined Constants And Macros *
31*********************************************************************************************************************************/
32#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
33# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
34#endif
35
36
37#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
38# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
39#endif
40
41/** Enables condensing of VMREAD instructions, see vmxHCReadToTransient(). */
42#define HMVMX_WITH_CONDENSED_VMREADS
43
44/** Use the function table. */
45#define HMVMX_USE_FUNCTION_TABLE
46
47/** Determine which tagged-TLB flush handler to use. */
48#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
49#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
50#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
51#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
52
53/** Assert that all the given fields have been read from the VMCS. */
54#ifdef VBOX_STRICT
55# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
56 do { \
57 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
58 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
59 } while (0)
60#else
61# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
62#endif
63
64/**
65 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
66 * guest using hardware-assisted VMX.
67 *
68 * This excludes state like GPRs (other than RSP) which are always are
69 * swapped and restored across the world-switch and also registers like EFER,
70 * MSR which cannot be modified by the guest without causing a VM-exit.
71 */
72#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
73 | CPUMCTX_EXTRN_RFLAGS \
74 | CPUMCTX_EXTRN_RSP \
75 | CPUMCTX_EXTRN_SREG_MASK \
76 | CPUMCTX_EXTRN_TABLE_MASK \
77 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
78 | CPUMCTX_EXTRN_SYSCALL_MSRS \
79 | CPUMCTX_EXTRN_SYSENTER_MSRS \
80 | CPUMCTX_EXTRN_TSC_AUX \
81 | CPUMCTX_EXTRN_OTHER_MSRS \
82 | CPUMCTX_EXTRN_CR0 \
83 | CPUMCTX_EXTRN_CR3 \
84 | CPUMCTX_EXTRN_CR4 \
85 | CPUMCTX_EXTRN_DR7 \
86 | CPUMCTX_EXTRN_HWVIRT \
87 | CPUMCTX_EXTRN_INHIBIT_INT \
88 | CPUMCTX_EXTRN_INHIBIT_NMI)
89
90/**
91 * Exception bitmap mask for real-mode guests (real-on-v86).
92 *
93 * We need to intercept all exceptions manually except:
94 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
95 * due to bugs in Intel CPUs.
96 * - \#PF need not be intercepted even in real-mode if we have nested paging
97 * support.
98 */
99#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
100 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
101 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
102 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
103 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
104 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
105 | RT_BIT(X86_XCPT_XF))
106
107/** Maximum VM-instruction error number. */
108#define HMVMX_INSTR_ERROR_MAX 28
109
110/** Profiling macro. */
111#ifdef HM_PROFILE_EXIT_DISPATCH
112# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
113# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
114#else
115# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
116# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
117#endif
118
119#ifndef IN_NEM_DARWIN
120/** Assert that preemption is disabled or covered by thread-context hooks. */
121# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
122 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
123
124/** Assert that we haven't migrated CPUs when thread-context hooks are not
125 * used. */
126# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
128 ("Illegal migration! Entered on CPU %u Current %u\n", \
129 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
130#else
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
132# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
133#endif
134
135/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
136 * context. */
137#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
138 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
139 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
140
141/** Log the VM-exit reason with an easily visible marker to identify it in a
142 * potential sea of logging data. */
143#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
144 do { \
145 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
146 HMGetVmxExitName(a_uExitReason))); \
147 } while (0) \
148
149
150/*********************************************************************************************************************************
151* Structures and Typedefs *
152*********************************************************************************************************************************/
153/**
154 * Memory operand read or write access.
155 */
156typedef enum VMXMEMACCESS
157{
158 VMXMEMACCESS_READ = 0,
159 VMXMEMACCESS_WRITE = 1
160} VMXMEMACCESS;
161
162
163/**
164 * VMX VM-exit handler.
165 *
166 * @returns Strict VBox status code (i.e. informational status codes too).
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pVmxTransient The VMX-transient structure.
169 */
170#ifndef HMVMX_USE_FUNCTION_TABLE
171typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
172#else
173typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
174/** Pointer to VM-exit handler. */
175typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
176#endif
177
178/**
179 * VMX VM-exit handler, non-strict status code.
180 *
181 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
182 *
183 * @returns VBox status code, no informational status code returned.
184 * @param pVCpu The cross context virtual CPU structure.
185 * @param pVmxTransient The VMX-transient structure.
186 *
187 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
188 * use of that status code will be replaced with VINF_EM_SOMETHING
189 * later when switching over to IEM.
190 */
191#ifndef HMVMX_USE_FUNCTION_TABLE
192typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
193#else
194typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
195#endif
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201#ifndef HMVMX_USE_FUNCTION_TABLE
202DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
204# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
205#else
206# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
207# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
208#endif
209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
210DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
211#endif
212
213static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
214
215/** @name VM-exit handler prototypes.
216 * @{
217 */
218static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
219static FNVMXEXITHANDLER vmxHCExitExtInt;
220static FNVMXEXITHANDLER vmxHCExitTripleFault;
221static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
222static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
223static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
224static FNVMXEXITHANDLER vmxHCExitCpuid;
225static FNVMXEXITHANDLER vmxHCExitGetsec;
226static FNVMXEXITHANDLER vmxHCExitHlt;
227static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
228static FNVMXEXITHANDLER vmxHCExitInvlpg;
229static FNVMXEXITHANDLER vmxHCExitRdpmc;
230static FNVMXEXITHANDLER vmxHCExitVmcall;
231#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
232static FNVMXEXITHANDLER vmxHCExitVmclear;
233static FNVMXEXITHANDLER vmxHCExitVmlaunch;
234static FNVMXEXITHANDLER vmxHCExitVmptrld;
235static FNVMXEXITHANDLER vmxHCExitVmptrst;
236static FNVMXEXITHANDLER vmxHCExitVmread;
237static FNVMXEXITHANDLER vmxHCExitVmresume;
238static FNVMXEXITHANDLER vmxHCExitVmwrite;
239static FNVMXEXITHANDLER vmxHCExitVmxoff;
240static FNVMXEXITHANDLER vmxHCExitVmxon;
241static FNVMXEXITHANDLER vmxHCExitInvvpid;
242# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
243static FNVMXEXITHANDLER vmxHCExitInvept;
244# endif
245#endif
246static FNVMXEXITHANDLER vmxHCExitRdtsc;
247static FNVMXEXITHANDLER vmxHCExitMovCRx;
248static FNVMXEXITHANDLER vmxHCExitMovDRx;
249static FNVMXEXITHANDLER vmxHCExitIoInstr;
250static FNVMXEXITHANDLER vmxHCExitRdmsr;
251static FNVMXEXITHANDLER vmxHCExitWrmsr;
252static FNVMXEXITHANDLER vmxHCExitMwait;
253static FNVMXEXITHANDLER vmxHCExitMtf;
254static FNVMXEXITHANDLER vmxHCExitMonitor;
255static FNVMXEXITHANDLER vmxHCExitPause;
256static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
257static FNVMXEXITHANDLER vmxHCExitApicAccess;
258static FNVMXEXITHANDLER vmxHCExitEptViolation;
259static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
260static FNVMXEXITHANDLER vmxHCExitRdtscp;
261static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
262static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
263static FNVMXEXITHANDLER vmxHCExitXsetbv;
264static FNVMXEXITHANDLER vmxHCExitInvpcid;
265#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
266static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
267#endif
268static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
269static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
270/** @} */
271
272#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
273/** @name Nested-guest VM-exit handler prototypes.
274 * @{
275 */
276static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
277static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
278static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
279static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
280static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
281static FNVMXEXITHANDLER vmxHCExitHltNested;
282static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
283static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
284static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
285static FNVMXEXITHANDLER vmxHCExitRdtscNested;
286static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
287static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
288static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
289static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
290static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
291static FNVMXEXITHANDLER vmxHCExitMwaitNested;
292static FNVMXEXITHANDLER vmxHCExitMtfNested;
293static FNVMXEXITHANDLER vmxHCExitMonitorNested;
294static FNVMXEXITHANDLER vmxHCExitPauseNested;
295static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
296static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
297static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
298static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
299static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
301static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
302static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
303static FNVMXEXITHANDLER vmxHCExitInstrNested;
304static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
307static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
308# endif
309/** @} */
310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
311
312
313/*********************************************************************************************************************************
314* Global Variables *
315*********************************************************************************************************************************/
316#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
317/**
318 * Array of all VMCS fields.
319 * Any fields added to the VT-x spec. should be added here.
320 *
321 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
322 * of nested-guests.
323 */
324static const uint32_t g_aVmcsFields[] =
325{
326 /* 16-bit control fields. */
327 VMX_VMCS16_VPID,
328 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
329 VMX_VMCS16_EPTP_INDEX,
330 VMX_VMCS16_HLAT_PREFIX_SIZE,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_FULL,
411 VMX_VMCS64_CTRL_PCONFIG_EXITING_BITMAP_HIGH,
412 VMX_VMCS64_CTRL_HLAT_PTR_FULL,
413 VMX_VMCS64_CTRL_HLAT_PTR_HIGH,
414 VMX_VMCS64_CTRL_EXIT2_FULL,
415 VMX_VMCS64_CTRL_EXIT2_HIGH,
416
417 /* 64-bit read-only data fields. */
418 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
420
421 /* 64-bit guest-state fields. */
422 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
424 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
425 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
426 VMX_VMCS64_GUEST_PAT_FULL,
427 VMX_VMCS64_GUEST_PAT_HIGH,
428 VMX_VMCS64_GUEST_EFER_FULL,
429 VMX_VMCS64_GUEST_EFER_HIGH,
430 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
432 VMX_VMCS64_GUEST_PDPTE0_FULL,
433 VMX_VMCS64_GUEST_PDPTE0_HIGH,
434 VMX_VMCS64_GUEST_PDPTE1_FULL,
435 VMX_VMCS64_GUEST_PDPTE1_HIGH,
436 VMX_VMCS64_GUEST_PDPTE2_FULL,
437 VMX_VMCS64_GUEST_PDPTE2_HIGH,
438 VMX_VMCS64_GUEST_PDPTE3_FULL,
439 VMX_VMCS64_GUEST_PDPTE3_HIGH,
440 VMX_VMCS64_GUEST_BNDCFGS_FULL,
441 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
442 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
443 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
444 VMX_VMCS64_GUEST_PKRS_FULL,
445 VMX_VMCS64_GUEST_PKRS_HIGH,
446
447 /* 64-bit host-state fields. */
448 VMX_VMCS64_HOST_PAT_FULL,
449 VMX_VMCS64_HOST_PAT_HIGH,
450 VMX_VMCS64_HOST_EFER_FULL,
451 VMX_VMCS64_HOST_EFER_HIGH,
452 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
454 VMX_VMCS64_HOST_PKRS_FULL,
455 VMX_VMCS64_HOST_PKRS_HIGH,
456
457 /* 32-bit control fields. */
458 VMX_VMCS32_CTRL_PIN_EXEC,
459 VMX_VMCS32_CTRL_PROC_EXEC,
460 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
461 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
463 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
464 VMX_VMCS32_CTRL_EXIT,
465 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
466 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
467 VMX_VMCS32_CTRL_ENTRY,
468 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
469 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
470 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
471 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
472 VMX_VMCS32_CTRL_TPR_THRESHOLD,
473 VMX_VMCS32_CTRL_PROC_EXEC2,
474 VMX_VMCS32_CTRL_PLE_GAP,
475 VMX_VMCS32_CTRL_PLE_WINDOW,
476
477 /* 32-bits read-only fields. */
478 VMX_VMCS32_RO_VM_INSTR_ERROR,
479 VMX_VMCS32_RO_EXIT_REASON,
480 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
482 VMX_VMCS32_RO_IDT_VECTORING_INFO,
483 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
484 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
485 VMX_VMCS32_RO_EXIT_INSTR_INFO,
486
487 /* 32-bit guest-state fields. */
488 VMX_VMCS32_GUEST_ES_LIMIT,
489 VMX_VMCS32_GUEST_CS_LIMIT,
490 VMX_VMCS32_GUEST_SS_LIMIT,
491 VMX_VMCS32_GUEST_DS_LIMIT,
492 VMX_VMCS32_GUEST_FS_LIMIT,
493 VMX_VMCS32_GUEST_GS_LIMIT,
494 VMX_VMCS32_GUEST_LDTR_LIMIT,
495 VMX_VMCS32_GUEST_TR_LIMIT,
496 VMX_VMCS32_GUEST_GDTR_LIMIT,
497 VMX_VMCS32_GUEST_IDTR_LIMIT,
498 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_INT_STATE,
507 VMX_VMCS32_GUEST_ACTIVITY_STATE,
508 VMX_VMCS32_GUEST_SMBASE,
509 VMX_VMCS32_GUEST_SYSENTER_CS,
510 VMX_VMCS32_PREEMPT_TIMER_VALUE,
511
512 /* 32-bit host-state fields. */
513 VMX_VMCS32_HOST_SYSENTER_CS,
514
515 /* Natural-width control fields. */
516 VMX_VMCS_CTRL_CR0_MASK,
517 VMX_VMCS_CTRL_CR4_MASK,
518 VMX_VMCS_CTRL_CR0_READ_SHADOW,
519 VMX_VMCS_CTRL_CR4_READ_SHADOW,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
524
525 /* Natural-width read-only data fields. */
526 VMX_VMCS_RO_EXIT_QUALIFICATION,
527 VMX_VMCS_RO_IO_RCX,
528 VMX_VMCS_RO_IO_RSI,
529 VMX_VMCS_RO_IO_RDI,
530 VMX_VMCS_RO_IO_RIP,
531 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
532
533 /* Natural-width guest-state field */
534 VMX_VMCS_GUEST_CR0,
535 VMX_VMCS_GUEST_CR3,
536 VMX_VMCS_GUEST_CR4,
537 VMX_VMCS_GUEST_ES_BASE,
538 VMX_VMCS_GUEST_CS_BASE,
539 VMX_VMCS_GUEST_SS_BASE,
540 VMX_VMCS_GUEST_DS_BASE,
541 VMX_VMCS_GUEST_FS_BASE,
542 VMX_VMCS_GUEST_GS_BASE,
543 VMX_VMCS_GUEST_LDTR_BASE,
544 VMX_VMCS_GUEST_TR_BASE,
545 VMX_VMCS_GUEST_GDTR_BASE,
546 VMX_VMCS_GUEST_IDTR_BASE,
547 VMX_VMCS_GUEST_DR7,
548 VMX_VMCS_GUEST_RSP,
549 VMX_VMCS_GUEST_RIP,
550 VMX_VMCS_GUEST_RFLAGS,
551 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
552 VMX_VMCS_GUEST_SYSENTER_ESP,
553 VMX_VMCS_GUEST_SYSENTER_EIP,
554 VMX_VMCS_GUEST_S_CET,
555 VMX_VMCS_GUEST_SSP,
556 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
557
558 /* Natural-width host-state fields */
559 VMX_VMCS_HOST_CR0,
560 VMX_VMCS_HOST_CR3,
561 VMX_VMCS_HOST_CR4,
562 VMX_VMCS_HOST_FS_BASE,
563 VMX_VMCS_HOST_GS_BASE,
564 VMX_VMCS_HOST_TR_BASE,
565 VMX_VMCS_HOST_GDTR_BASE,
566 VMX_VMCS_HOST_IDTR_BASE,
567 VMX_VMCS_HOST_SYSENTER_ESP,
568 VMX_VMCS_HOST_SYSENTER_EIP,
569 VMX_VMCS_HOST_RSP,
570 VMX_VMCS_HOST_RIP,
571 VMX_VMCS_HOST_S_CET,
572 VMX_VMCS_HOST_SSP,
573 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
574};
575#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
576
577#ifdef HMVMX_USE_FUNCTION_TABLE
578/**
579 * VMX_EXIT dispatch table.
580 */
581static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
582{
583 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
584 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
585 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
586 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
587 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
588 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
589 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
590 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
591 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
592 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
593 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
594 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
595 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
596 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
597 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
598 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
599 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
600 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
601 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
603 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
604 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
605 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
606 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
607 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
608 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
609 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
610 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
611 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
612#else
613 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
614 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
615 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
616 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
617 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
618 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
619 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
620 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
621 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
622#endif
623 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
624 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
625 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
626 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
627 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
628 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
629 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
630 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
631 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
632 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
633 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
634 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
635 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
636 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
637 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
638 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
639 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
640 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
641 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
642 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
643 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
644 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
646 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
647#else
648 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
649#endif
650 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
651 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
653 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
654#else
655 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
656#endif
657 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
658 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
659 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
660 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
661 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
662 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
663 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
664 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
665 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
666 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
667 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
668 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
669 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
670 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
671 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
672 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
673};
674#endif /* HMVMX_USE_FUNCTION_TABLE */
675
676#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
677static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
678{
679 /* 0 */ "(Not Used)",
680 /* 1 */ "VMCALL executed in VMX root operation.",
681 /* 2 */ "VMCLEAR with invalid physical address.",
682 /* 3 */ "VMCLEAR with VMXON pointer.",
683 /* 4 */ "VMLAUNCH with non-clear VMCS.",
684 /* 5 */ "VMRESUME with non-launched VMCS.",
685 /* 6 */ "VMRESUME after VMXOFF",
686 /* 7 */ "VM-entry with invalid control fields.",
687 /* 8 */ "VM-entry with invalid host state fields.",
688 /* 9 */ "VMPTRLD with invalid physical address.",
689 /* 10 */ "VMPTRLD with VMXON pointer.",
690 /* 11 */ "VMPTRLD with incorrect revision identifier.",
691 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
692 /* 13 */ "VMWRITE to read-only VMCS component.",
693 /* 14 */ "(Not Used)",
694 /* 15 */ "VMXON executed in VMX root operation.",
695 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
696 /* 17 */ "VM-entry with non-launched executing VMCS.",
697 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
698 /* 19 */ "VMCALL with non-clear VMCS.",
699 /* 20 */ "VMCALL with invalid VM-exit control fields.",
700 /* 21 */ "(Not Used)",
701 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
702 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
703 /* 24 */ "VMCALL with invalid SMM-monitor features.",
704 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
705 /* 26 */ "VM-entry with events blocked by MOV SS.",
706 /* 27 */ "(Not Used)",
707 /* 28 */ "Invalid operand to INVEPT/INVVPID."
708};
709#endif /* VBOX_STRICT && LOG_ENABLED */
710
711
712/**
713 * Gets the CR0 guest/host mask.
714 *
715 * These bits typically does not change through the lifetime of a VM. Any bit set in
716 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
717 * by the guest.
718 *
719 * @returns The CR0 guest/host mask.
720 * @param pVCpu The cross context virtual CPU structure.
721 */
722static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
723{
724 /*
725 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
726 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
727 *
728 * Furthermore, modifications to any bits that are reserved/unspecified currently
729 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
730 * when future CPUs specify and use currently reserved/unspecified bits.
731 */
732 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
733 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
734 * and @bugref{6944}. */
735 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
736 AssertCompile(RT_HI_U32(VMX_EXIT_HOST_CR0_IGNORE_MASK) == UINT32_C(0xffffffff)); /* Paranoia. */
737 return ( X86_CR0_PE
738 | X86_CR0_NE
739 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
740 | X86_CR0_PG
741 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
742}
743
744
745/**
746 * Gets the CR4 guest/host mask.
747 *
748 * These bits typically does not change through the lifetime of a VM. Any bit set in
749 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
750 * by the guest.
751 *
752 * @returns The CR4 guest/host mask.
753 * @param pVCpu The cross context virtual CPU structure.
754 */
755static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
756{
757 /*
758 * We construct a mask of all CR4 bits that the guest can modify without causing
759 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
760 * a VM-exit when the guest attempts to modify them when executing using
761 * hardware-assisted VMX.
762 *
763 * When a feature is not exposed to the guest (and may be present on the host),
764 * we want to intercept guest modifications to the bit so we can emulate proper
765 * behavior (e.g., #GP).
766 *
767 * Furthermore, only modifications to those bits that don't require immediate
768 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
769 * depends on CR3 which might not always be the guest value while executing
770 * using hardware-assisted VMX.
771 */
772 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
773 bool fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
774#ifdef IN_NEM_DARWIN
775 bool fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
776#endif
777 bool fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
778
779 /*
780 * Paranoia.
781 * Ensure features exposed to the guest are present on the host.
782 */
783 AssertStmt(!fFsGsBase || g_CpumHostFeatures.s.fFsGsBase, fFsGsBase = 0);
784#ifdef IN_NEM_DARWIN
785 AssertStmt(!fXSaveRstor || g_CpumHostFeatures.s.fXSaveRstor, fXSaveRstor = 0);
786#endif
787 AssertStmt(!fFxSaveRstor || g_CpumHostFeatures.s.fFxSaveRstor, fFxSaveRstor = 0);
788
789 uint64_t const fGstMask = X86_CR4_PVI
790 | X86_CR4_TSD
791 | X86_CR4_DE
792 | X86_CR4_MCE
793 | X86_CR4_PCE
794 | X86_CR4_OSXMMEEXCPT
795 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
796#ifdef IN_NEM_DARWIN /* On native VT-x setting OSXSAVE must exit as we need to load guest XCR0 (see
797 fLoadSaveGuestXcr0). These exits are not needed on Darwin as that's not our problem. */
798 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
799#endif
800 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0);
801 return ~fGstMask;
802}
803
804
805/**
806 * Adds one or more exceptions to the exception bitmap and commits it to the current
807 * VMCS.
808 *
809 * @param pVCpu The cross context virtual CPU structure.
810 * @param pVmxTransient The VMX-transient structure.
811 * @param uXcptMask The exception(s) to add.
812 */
813static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
814{
815 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
816 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
817 if ((uXcptBitmap & uXcptMask) != uXcptMask)
818 {
819 uXcptBitmap |= uXcptMask;
820 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
821 AssertRC(rc);
822 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
823 }
824}
825
826
827/**
828 * Adds an exception to the exception bitmap and commits it to the current VMCS.
829 *
830 * @param pVCpu The cross context virtual CPU structure.
831 * @param pVmxTransient The VMX-transient structure.
832 * @param uXcpt The exception to add.
833 */
834static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
835{
836 Assert(uXcpt <= X86_XCPT_LAST);
837 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
838}
839
840
841/**
842 * Remove one or more exceptions from the exception bitmap and commits it to the
843 * current VMCS.
844 *
845 * This takes care of not removing the exception intercept if a nested-guest
846 * requires the exception to be intercepted.
847 *
848 * @returns VBox status code.
849 * @param pVCpu The cross context virtual CPU structure.
850 * @param pVmxTransient The VMX-transient structure.
851 * @param uXcptMask The exception(s) to remove.
852 */
853static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
854{
855 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
856 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
857 if (uXcptBitmap & uXcptMask)
858 {
859#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
860 if (!pVmxTransient->fIsNestedGuest)
861 { /* likely */ }
862 else
863 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
864#endif
865#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
866 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
867 | RT_BIT(X86_XCPT_DE)
868 | RT_BIT(X86_XCPT_NM)
869 | RT_BIT(X86_XCPT_TS)
870 | RT_BIT(X86_XCPT_UD)
871 | RT_BIT(X86_XCPT_NP)
872 | RT_BIT(X86_XCPT_SS)
873 | RT_BIT(X86_XCPT_GP)
874 | RT_BIT(X86_XCPT_PF)
875 | RT_BIT(X86_XCPT_MF));
876#elif defined(HMVMX_ALWAYS_TRAP_PF)
877 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
878#endif
879 if (uXcptMask)
880 {
881 /* Validate we are not removing any essential exception intercepts. */
882#ifndef IN_NEM_DARWIN
883 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
884#else
885 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
886#endif
887 NOREF(pVCpu);
888 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
889 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
890
891 /* Remove it from the exception bitmap. */
892 uXcptBitmap &= ~uXcptMask;
893
894 /* Commit and update the cache if necessary. */
895 if (pVmcsInfo->u32XcptBitmap != uXcptBitmap)
896 {
897 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
898 AssertRC(rc);
899 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
900 }
901 }
902 }
903 return VINF_SUCCESS;
904}
905
906
907/**
908 * Remove an exceptions from the exception bitmap and commits it to the current
909 * VMCS.
910 *
911 * @returns VBox status code.
912 * @param pVCpu The cross context virtual CPU structure.
913 * @param pVmxTransient The VMX-transient structure.
914 * @param uXcpt The exception to remove.
915 */
916static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
917{
918 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
919}
920
921#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
922
923/**
924 * Loads the shadow VMCS specified by the VMCS info. object.
925 *
926 * @returns VBox status code.
927 * @param pVmcsInfo The VMCS info. object.
928 *
929 * @remarks Can be called with interrupts disabled.
930 */
931static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
932{
933 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
934 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
935
936 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
937 if (RT_SUCCESS(rc))
938 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
939 return rc;
940}
941
942
943/**
944 * Clears the shadow VMCS specified by the VMCS info. object.
945 *
946 * @returns VBox status code.
947 * @param pVmcsInfo The VMCS info. object.
948 *
949 * @remarks Can be called with interrupts disabled.
950 */
951static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
952{
953 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
954 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
955
956 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
957 if (RT_SUCCESS(rc))
958 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
959 return rc;
960}
961
962
963/**
964 * Switches from and to the specified VMCSes.
965 *
966 * @returns VBox status code.
967 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
968 * @param pVmcsInfoTo The VMCS info. object we are switching to.
969 *
970 * @remarks Called with interrupts disabled.
971 */
972static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
973{
974 /*
975 * Clear the VMCS we are switching out if it has not already been cleared.
976 * This will sync any CPU internal data back to the VMCS.
977 */
978 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
979 {
980 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
981 if (RT_SUCCESS(rc))
982 {
983 /*
984 * The shadow VMCS, if any, would not be active at this point since we
985 * would have cleared it while importing the virtual hardware-virtualization
986 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
987 * clear the shadow VMCS here, just assert for safety.
988 */
989 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
990 }
991 else
992 return rc;
993 }
994
995 /*
996 * Clear the VMCS we are switching to if it has not already been cleared.
997 * This will initialize the VMCS launch state to "clear" required for loading it.
998 *
999 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1000 */
1001 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1002 {
1003 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1004 if (RT_SUCCESS(rc))
1005 { /* likely */ }
1006 else
1007 return rc;
1008 }
1009
1010 /*
1011 * Finally, load the VMCS we are switching to.
1012 */
1013 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1014}
1015
1016
1017/**
1018 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1019 * caller.
1020 *
1021 * @returns VBox status code.
1022 * @param pVCpu The cross context virtual CPU structure.
1023 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1024 * true) or guest VMCS (pass false).
1025 */
1026static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1027{
1028 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1029 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1030
1031 PVMXVMCSINFO pVmcsInfoFrom;
1032 PVMXVMCSINFO pVmcsInfoTo;
1033 if (fSwitchToNstGstVmcs)
1034 {
1035 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1036 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1037 }
1038 else
1039 {
1040 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1041 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1042 }
1043
1044 /*
1045 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1046 * preemption hook code path acquires the current VMCS.
1047 */
1048 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1049
1050 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1051 if (RT_SUCCESS(rc))
1052 {
1053 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1054 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1055
1056 /*
1057 * If we are switching to a VMCS that was executed on a different host CPU or was
1058 * never executed before, flag that we need to export the host state before executing
1059 * guest/nested-guest code using hardware-assisted VMX.
1060 *
1061 * This could probably be done in a preemptible context since the preemption hook
1062 * will flag the necessary change in host context. However, since preemption is
1063 * already disabled and to avoid making assumptions about host specific code in
1064 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1065 * disabled.
1066 */
1067 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1068 { /* likely */ }
1069 else
1070 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1071
1072 ASMSetFlags(fEFlags);
1073
1074 /*
1075 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1076 * flag that we need to update the host MSR values there. Even if we decide in the
1077 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1078 * if its content differs, we would have to update the host MSRs anyway.
1079 */
1080 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1081 }
1082 else
1083 ASMSetFlags(fEFlags);
1084 return rc;
1085}
1086
1087#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1088#ifdef VBOX_STRICT
1089
1090/**
1091 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1092 * transient structure.
1093 *
1094 * @param pVCpu The cross context virtual CPU structure.
1095 * @param pVmxTransient The VMX-transient structure.
1096 */
1097DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1098{
1099 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1100 AssertRC(rc);
1101}
1102
1103
1104/**
1105 * Reads the VM-entry exception error code field from the VMCS into
1106 * the VMX transient structure.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param pVmxTransient The VMX-transient structure.
1110 */
1111DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1112{
1113 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1114 AssertRC(rc);
1115}
1116
1117
1118/**
1119 * Reads the VM-entry exception error code field from the VMCS into
1120 * the VMX transient structure.
1121 *
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param pVmxTransient The VMX-transient structure.
1124 */
1125DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1126{
1127 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1128 AssertRC(rc);
1129}
1130
1131#endif /* VBOX_STRICT */
1132
1133
1134/**
1135 * Reads VMCS fields into the VMXTRANSIENT structure, slow path version.
1136 *
1137 * Don't call directly unless the it's likely that some or all of the fields
1138 * given in @a a_fReadMask have already been read.
1139 *
1140 * @tparam a_fReadMask The fields to read.
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param pVmxTransient The VMX-transient structure.
1143 */
1144template<uint32_t const a_fReadMask>
1145static void vmxHCReadToTransientSlow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1146{
1147 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1148 | HMVMX_READ_EXIT_INSTR_LEN
1149 | HMVMX_READ_EXIT_INSTR_INFO
1150 | HMVMX_READ_IDT_VECTORING_INFO
1151 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1152 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1153 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1154 | HMVMX_READ_GUEST_LINEAR_ADDR
1155 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1156 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1157 )) == 0);
1158
1159 if ((pVmxTransient->fVmcsFieldsRead & a_fReadMask) != a_fReadMask)
1160 {
1161 uint32_t const fVmcsFieldsRead = pVmxTransient->fVmcsFieldsRead;
1162
1163 if ( (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1164 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1165 {
1166 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1167 AssertRC(rc);
1168 }
1169 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1170 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1171 {
1172 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1173 AssertRC(rc);
1174 }
1175 if ( (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1176 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1177 {
1178 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1179 AssertRC(rc);
1180 }
1181 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1182 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1183 {
1184 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1185 AssertRC(rc);
1186 }
1187 if ( (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1188 && !(fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1189 {
1190 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1191 AssertRC(rc);
1192 }
1193 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1194 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1195 {
1196 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1197 AssertRC(rc);
1198 }
1199 if ( (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1200 && !(fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1201 {
1202 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1203 AssertRC(rc);
1204 }
1205 if ( (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1206 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1207 {
1208 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1209 AssertRC(rc);
1210 }
1211 if ( (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1212 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1213 {
1214 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1215 AssertRC(rc);
1216 }
1217 if ( (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1218 && !(fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1219 {
1220 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1221 AssertRC(rc);
1222 }
1223
1224 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1225 }
1226}
1227
1228
1229/**
1230 * Reads VMCS fields into the VMXTRANSIENT structure.
1231 *
1232 * This optimizes for the case where none of @a a_fReadMask has been read yet,
1233 * generating an optimized read sequences w/o any conditionals between in
1234 * non-strict builds.
1235 *
1236 * @tparam a_fReadMask The fields to read. One or more of the
1237 * HMVMX_READ_XXX fields ORed together.
1238 * @param pVCpu The cross context virtual CPU structure.
1239 * @param pVmxTransient The VMX-transient structure.
1240 */
1241template<uint32_t const a_fReadMask>
1242DECLINLINE(void) vmxHCReadToTransient(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1243{
1244 AssertCompile((a_fReadMask & ~( HMVMX_READ_EXIT_QUALIFICATION
1245 | HMVMX_READ_EXIT_INSTR_LEN
1246 | HMVMX_READ_EXIT_INSTR_INFO
1247 | HMVMX_READ_IDT_VECTORING_INFO
1248 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1249 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1250 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1251 | HMVMX_READ_GUEST_LINEAR_ADDR
1252 | HMVMX_READ_GUEST_PHYSICAL_ADDR
1253 | HMVMX_READ_GUEST_PENDING_DBG_XCPTS
1254 )) == 0);
1255
1256 if (RT_LIKELY(!(pVmxTransient->fVmcsFieldsRead & a_fReadMask)))
1257 {
1258 if (a_fReadMask & HMVMX_READ_EXIT_QUALIFICATION)
1259 {
1260 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1261 AssertRC(rc);
1262 }
1263 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_LEN)
1264 {
1265 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1266 AssertRC(rc);
1267 }
1268 if (a_fReadMask & HMVMX_READ_EXIT_INSTR_INFO)
1269 {
1270 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1271 AssertRC(rc);
1272 }
1273 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_INFO)
1274 {
1275 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1276 AssertRC(rc);
1277 }
1278 if (a_fReadMask & HMVMX_READ_IDT_VECTORING_ERROR_CODE)
1279 {
1280 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1281 AssertRC(rc);
1282 }
1283 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_INFO)
1284 {
1285 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1286 AssertRC(rc);
1287 }
1288 if (a_fReadMask & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)
1289 {
1290 int const rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1291 AssertRC(rc);
1292 }
1293 if (a_fReadMask & HMVMX_READ_GUEST_LINEAR_ADDR)
1294 {
1295 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1296 AssertRC(rc);
1297 }
1298 if (a_fReadMask & HMVMX_READ_GUEST_PHYSICAL_ADDR)
1299 {
1300 int const rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1301 AssertRC(rc);
1302 }
1303 if (a_fReadMask & HMVMX_READ_GUEST_PENDING_DBG_XCPTS)
1304 {
1305 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1306 AssertRC(rc);
1307 }
1308
1309 pVmxTransient->fVmcsFieldsRead |= a_fReadMask;
1310 }
1311 else
1312 {
1313 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatReadToTransientFallback);
1314 Log11Func(("a_fReadMask=%#x fVmcsFieldsRead=%#x => %#x - Taking inefficient code path!\n",
1315 a_fReadMask, pVmxTransient->fVmcsFieldsRead, a_fReadMask & pVmxTransient->fVmcsFieldsRead));
1316 vmxHCReadToTransientSlow<a_fReadMask>(pVCpu, pVmxTransient);
1317 }
1318}
1319
1320
1321#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1322/**
1323 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1324 *
1325 * @param pVCpu The cross context virtual CPU structure.
1326 * @param pVmxTransient The VMX-transient structure.
1327 */
1328static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1329{
1330 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1331 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1332 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1333 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1334 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1335 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1336 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1337 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1338 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1339 AssertRC(rc);
1340 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1341 | HMVMX_READ_EXIT_INSTR_LEN
1342 | HMVMX_READ_EXIT_INSTR_INFO
1343 | HMVMX_READ_IDT_VECTORING_INFO
1344 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1345 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1346 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1347 | HMVMX_READ_GUEST_LINEAR_ADDR
1348 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1349}
1350#endif
1351
1352/**
1353 * Verifies that our cached values of the VMCS fields are all consistent with
1354 * what's actually present in the VMCS.
1355 *
1356 * @returns VBox status code.
1357 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1358 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1359 * VMCS content. HMCPU error-field is
1360 * updated, see VMX_VCI_XXX.
1361 * @param pVCpu The cross context virtual CPU structure.
1362 * @param pVmcsInfo The VMCS info. object.
1363 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1364 */
1365static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1366{
1367 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1368
1369 uint32_t u32Val;
1370 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1371 AssertRC(rc);
1372 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1373 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1374 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1375 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1376
1377 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1378 AssertRC(rc);
1379 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1380 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1381 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1382 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1383
1384 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1385 AssertRC(rc);
1386 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1387 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1388 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1389 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1390
1391 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1392 AssertRC(rc);
1393 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1394 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1395 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1396 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1397
1398 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1399 {
1400 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1401 AssertRC(rc);
1402 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1403 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1404 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1405 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1406 }
1407
1408 uint64_t u64Val;
1409 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1410 {
1411 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1412 AssertRC(rc);
1413 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1414 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1415 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1416 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1417 }
1418
1419 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1420 AssertRC(rc);
1421 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1422 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1423 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1424 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1425
1426 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1427 AssertRC(rc);
1428 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1429 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1430 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1431 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1432
1433 NOREF(pcszVmcs);
1434 return VINF_SUCCESS;
1435}
1436
1437
1438/**
1439 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1440 * VMCS.
1441 *
1442 * This is typically required when the guest changes paging mode.
1443 *
1444 * @returns VBox status code.
1445 * @param pVCpu The cross context virtual CPU structure.
1446 * @param pVmxTransient The VMX-transient structure.
1447 *
1448 * @remarks Requires EFER.
1449 * @remarks No-long-jump zone!!!
1450 */
1451static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1452{
1453 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1454 {
1455 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1456 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1457
1458 /*
1459 * VM-entry controls.
1460 */
1461 {
1462 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1463 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1464
1465 /*
1466 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1467 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1468 *
1469 * For nested-guests, this is a mandatory VM-entry control. It's also
1470 * required because we do not want to leak host bits to the nested-guest.
1471 */
1472 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1473
1474 /*
1475 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1476 *
1477 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1478 * required to get the nested-guest working with hardware-assisted VMX execution.
1479 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1480 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1481 * here rather than while merging the guest VMCS controls.
1482 */
1483 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1484 {
1485 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1486 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1487 }
1488 else
1489 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1490
1491 /*
1492 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1493 *
1494 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1495 * regardless of whether the nested-guest VMCS specifies it because we are free to
1496 * load whatever MSRs we require and we do not need to modify the guest visible copy
1497 * of the VM-entry MSR load area.
1498 */
1499 if ( g_fHmVmxSupportsVmcsEfer
1500#ifndef IN_NEM_DARWIN
1501 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1502#endif
1503 )
1504 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1505 else
1506 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1507
1508 /*
1509 * The following should -not- be set (since we're not in SMM mode):
1510 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1511 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1512 */
1513
1514 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1515 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1516
1517 if ((fVal & fZap) == fVal)
1518 { /* likely */ }
1519 else
1520 {
1521 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1522 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1523 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1524 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1525 }
1526
1527 /* Commit it to the VMCS. */
1528 if (pVmcsInfo->u32EntryCtls != fVal)
1529 {
1530 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1531 AssertRC(rc);
1532 pVmcsInfo->u32EntryCtls = fVal;
1533 }
1534 }
1535
1536 /*
1537 * VM-exit controls.
1538 */
1539 {
1540 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1541 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1542
1543 /*
1544 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1545 * supported the 1-setting of this bit.
1546 *
1547 * For nested-guests, we set the "save debug controls" as the converse
1548 * "load debug controls" is mandatory for nested-guests anyway.
1549 */
1550 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1551
1552 /*
1553 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1554 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1555 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1556 * vmxHCExportHostMsrs().
1557 *
1558 * For nested-guests, we always set this bit as we do not support 32-bit
1559 * hosts.
1560 */
1561 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1562
1563#ifndef IN_NEM_DARWIN
1564 /*
1565 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1566 *
1567 * For nested-guests, we should use the "save IA32_EFER" control if we also
1568 * used the "load IA32_EFER" control while exporting VM-entry controls.
1569 */
1570 if ( g_fHmVmxSupportsVmcsEfer
1571 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1572 {
1573 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1574 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1575 }
1576#endif
1577
1578 /*
1579 * Enable saving of the VMX-preemption timer value on VM-exit.
1580 * For nested-guests, currently not exposed/used.
1581 */
1582 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1583 * the timer value. */
1584 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1585 {
1586 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1587 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1588 }
1589
1590 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1591 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1592
1593 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1594 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1595 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1596
1597 if ((fVal & fZap) == fVal)
1598 { /* likely */ }
1599 else
1600 {
1601 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1602 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1603 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1604 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1605 }
1606
1607 /* Commit it to the VMCS. */
1608 if (pVmcsInfo->u32ExitCtls != fVal)
1609 {
1610 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1611 AssertRC(rc);
1612 pVmcsInfo->u32ExitCtls = fVal;
1613 }
1614 }
1615
1616 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1617 }
1618 return VINF_SUCCESS;
1619}
1620
1621
1622/**
1623 * Sets the TPR threshold in the VMCS.
1624 *
1625 * @param pVCpu The cross context virtual CPU structure.
1626 * @param pVmcsInfo The VMCS info. object.
1627 * @param u32TprThreshold The TPR threshold (task-priority class only).
1628 */
1629DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1630{
1631 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1632 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1633 RT_NOREF(pVmcsInfo);
1634 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1635 AssertRC(rc);
1636}
1637
1638
1639/**
1640 * Exports the guest APIC TPR state into the VMCS.
1641 *
1642 * @param pVCpu The cross context virtual CPU structure.
1643 * @param pVmxTransient The VMX-transient structure.
1644 *
1645 * @remarks No-long-jump zone!!!
1646 */
1647static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1648{
1649 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1650 {
1651 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1652
1653 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1654 if (!pVmxTransient->fIsNestedGuest)
1655 {
1656 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1657 && APICIsEnabled(pVCpu))
1658 {
1659 /*
1660 * Setup TPR shadowing.
1661 */
1662 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1663 {
1664 bool fPendingIntr = false;
1665 uint8_t u8Tpr = 0;
1666 uint8_t u8PendingIntr = 0;
1667 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1668 AssertRC(rc);
1669
1670 /*
1671 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1672 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1673 * priority of the pending interrupt so we can deliver the interrupt. If there
1674 * are no interrupts pending, set threshold to 0 to not cause any
1675 * TPR-below-threshold VM-exits.
1676 */
1677 uint32_t u32TprThreshold = 0;
1678 if (fPendingIntr)
1679 {
1680 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1681 (which is the Task-Priority Class). */
1682 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1683 const uint8_t u8TprPriority = u8Tpr >> 4;
1684 if (u8PendingPriority <= u8TprPriority)
1685 u32TprThreshold = u8PendingPriority;
1686 }
1687
1688 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1689 }
1690 }
1691 }
1692 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1693 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1694 }
1695}
1696
1697
1698/**
1699 * Gets the guest interruptibility-state and updates related force-flags.
1700 *
1701 * @returns Guest's interruptibility-state.
1702 * @param pVCpu The cross context virtual CPU structure.
1703 *
1704 * @remarks No-long-jump zone!!!
1705 */
1706static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1707{
1708 uint32_t fIntrState;
1709
1710 /*
1711 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1712 */
1713 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
1714 fIntrState = 0;
1715 else
1716 {
1717 /* If inhibition is active, RIP should've been imported from the VMCS already. */
1718 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1719
1720 if (CPUMIsInInterruptShadowAfterSs(&pVCpu->cpum.GstCtx))
1721 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1722 else
1723 {
1724 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1725
1726 /* Block-by-STI must not be set when interrupts are disabled. */
1727 AssertStmt(pVCpu->cpum.GstCtx.eflags.Bits.u1IF, fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1728 }
1729 }
1730
1731 /*
1732 * Check if we should inhibit NMI delivery.
1733 */
1734 if (!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1735 { /* likely */ }
1736 else
1737 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1738
1739 /*
1740 * Validate.
1741 */
1742 /* We don't support block-by-SMI yet.*/
1743 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1744
1745 return fIntrState;
1746}
1747
1748
1749/**
1750 * Exports the exception intercepts required for guest execution in the VMCS.
1751 *
1752 * @param pVCpu The cross context virtual CPU structure.
1753 * @param pVmxTransient The VMX-transient structure.
1754 *
1755 * @remarks No-long-jump zone!!!
1756 */
1757static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1758{
1759 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1760 {
1761 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1762 if ( !pVmxTransient->fIsNestedGuest
1763 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1764 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1765 else
1766 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1767
1768 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1769 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1770 }
1771}
1772
1773
1774/**
1775 * Exports the guest's RIP into the guest-state area in the VMCS.
1776 *
1777 * @param pVCpu The cross context virtual CPU structure.
1778 *
1779 * @remarks No-long-jump zone!!!
1780 */
1781static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1782{
1783 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1784 {
1785 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1786
1787 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1788 AssertRC(rc);
1789
1790 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1791 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1792 }
1793}
1794
1795
1796/**
1797 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1798 *
1799 * @param pVCpu The cross context virtual CPU structure.
1800 * @param pVmxTransient The VMX-transient structure.
1801 *
1802 * @remarks No-long-jump zone!!!
1803 */
1804static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1805{
1806 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1807 {
1808 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1809
1810 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits
1811 of RFLAGS are reserved (MBZ). We use bits 63:24 for internal purposes, so no need
1812 to assert this, the CPUMX86EFLAGS/CPUMX86RFLAGS union masks these off for us.
1813 Use 32-bit VMWRITE. */
1814 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
1815 Assert((fEFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
1816 AssertMsg(!(fEFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), ("%#x\n", fEFlags));
1817
1818#ifndef IN_NEM_DARWIN
1819 /*
1820 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1821 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1822 * can run the real-mode guest code under Virtual 8086 mode.
1823 */
1824 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1825 if (pVmcsInfo->RealMode.fRealOnV86Active)
1826 {
1827 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1828 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1829 Assert(!pVmxTransient->fIsNestedGuest);
1830 pVmcsInfo->RealMode.Eflags.u32 = fEFlags; /* Save the original eflags of the real-mode guest. */
1831 fEFlags |= X86_EFL_VM; /* Set the Virtual 8086 mode bit. */
1832 fEFlags &= ~X86_EFL_IOPL; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1833 }
1834#else
1835 RT_NOREF(pVmxTransient);
1836#endif
1837
1838 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags);
1839 AssertRC(rc);
1840
1841 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1842 Log4Func(("eflags=%#RX32\n", fEFlags));
1843 }
1844}
1845
1846
1847#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1848/**
1849 * Copies the nested-guest VMCS to the shadow VMCS.
1850 *
1851 * @returns VBox status code.
1852 * @param pVCpu The cross context virtual CPU structure.
1853 * @param pVmcsInfo The VMCS info. object.
1854 *
1855 * @remarks No-long-jump zone!!!
1856 */
1857static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1858{
1859 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1860 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1861
1862 /*
1863 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1864 * current VMCS, as we may try saving guest lazy MSRs.
1865 *
1866 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1867 * calling the import VMCS code which is currently performing the guest MSR reads
1868 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1869 * and the rest of the VMX leave session machinery.
1870 */
1871 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1872
1873 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1874 if (RT_SUCCESS(rc))
1875 {
1876 /*
1877 * Copy all guest read/write VMCS fields.
1878 *
1879 * We don't check for VMWRITE failures here for performance reasons and
1880 * because they are not expected to fail, barring irrecoverable conditions
1881 * like hardware errors.
1882 */
1883 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1884 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1885 {
1886 uint64_t u64Val;
1887 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1888 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1889 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1890 }
1891
1892 /*
1893 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1894 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1895 */
1896 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1897 {
1898 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1899 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1900 {
1901 uint64_t u64Val;
1902 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1903 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1904 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1905 }
1906 }
1907
1908 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1909 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1910 }
1911
1912 ASMSetFlags(fEFlags);
1913 return rc;
1914}
1915
1916
1917/**
1918 * Copies the shadow VMCS to the nested-guest VMCS.
1919 *
1920 * @returns VBox status code.
1921 * @param pVCpu The cross context virtual CPU structure.
1922 * @param pVmcsInfo The VMCS info. object.
1923 *
1924 * @remarks Called with interrupts disabled.
1925 */
1926static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1927{
1928 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1929 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1930 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1931
1932 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1933 if (RT_SUCCESS(rc))
1934 {
1935 /*
1936 * Copy guest read/write fields from the shadow VMCS.
1937 * Guest read-only fields cannot be modified, so no need to copy them.
1938 *
1939 * We don't check for VMREAD failures here for performance reasons and
1940 * because they are not expected to fail, barring irrecoverable conditions
1941 * like hardware errors.
1942 */
1943 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1944 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1945 {
1946 uint64_t u64Val;
1947 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1948 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1949 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1950 }
1951
1952 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1953 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1954 }
1955 return rc;
1956}
1957
1958
1959/**
1960 * Enables VMCS shadowing for the given VMCS info. object.
1961 *
1962 * @param pVCpu The cross context virtual CPU structure.
1963 * @param pVmcsInfo The VMCS info. object.
1964 *
1965 * @remarks No-long-jump zone!!!
1966 */
1967static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1968{
1969 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
1970 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
1971 {
1972 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1973 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
1974 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
1975 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
1976 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
1977 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
1978 Log4Func(("Enabled\n"));
1979 }
1980}
1981
1982
1983/**
1984 * Disables VMCS shadowing for the given VMCS info. object.
1985 *
1986 * @param pVCpu The cross context virtual CPU structure.
1987 * @param pVmcsInfo The VMCS info. object.
1988 *
1989 * @remarks No-long-jump zone!!!
1990 */
1991static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1992{
1993 /*
1994 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
1995 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
1996 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
1997 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
1998 *
1999 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2000 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2001 */
2002 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2003 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2004 {
2005 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2006 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2007 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2008 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2009 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2010 Log4Func(("Disabled\n"));
2011 }
2012}
2013#endif
2014
2015
2016/**
2017 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2018 *
2019 * The guest FPU state is always pre-loaded hence we don't need to bother about
2020 * sharing FPU related CR0 bits between the guest and host.
2021 *
2022 * @returns VBox status code.
2023 * @param pVCpu The cross context virtual CPU structure.
2024 * @param pVmxTransient The VMX-transient structure.
2025 *
2026 * @remarks No-long-jump zone!!!
2027 */
2028static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2029{
2030 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2031 {
2032 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2033 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2034
2035 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2036 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2037 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2038 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2039 else
2040 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2041
2042 if (!pVmxTransient->fIsNestedGuest)
2043 {
2044 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2045 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2046 uint64_t const u64ShadowCr0 = u64GuestCr0;
2047 Assert(!RT_HI_U32(u64GuestCr0));
2048
2049 /*
2050 * Setup VT-x's view of the guest CR0.
2051 */
2052 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2053 if (VM_IS_VMX_NESTED_PAGING(pVM))
2054 {
2055#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2056 if (CPUMIsGuestPagingEnabled(pVCpu))
2057 {
2058 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2059 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2060 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2061 }
2062 else
2063 {
2064 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2065 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2066 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2067 }
2068
2069 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2070 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2071 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2072#endif
2073 }
2074 else
2075 {
2076 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2077 u64GuestCr0 |= X86_CR0_WP;
2078 }
2079
2080 /*
2081 * Guest FPU bits.
2082 *
2083 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2084 * using CR0.TS.
2085 *
2086 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2087 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2088 */
2089 u64GuestCr0 |= X86_CR0_NE;
2090
2091 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2092 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2093
2094 /*
2095 * Update exception intercepts.
2096 */
2097 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2098#ifndef IN_NEM_DARWIN
2099 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2100 {
2101 Assert(PDMVmmDevHeapIsEnabled(pVM));
2102 Assert(pVM->hm.s.vmx.pRealModeTSS);
2103 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2104 }
2105 else
2106#endif
2107 {
2108 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2109 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2110 if (fInterceptMF)
2111 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2112 }
2113
2114 /* Additional intercepts for debugging, define these yourself explicitly. */
2115#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2116 uXcptBitmap |= 0
2117 | RT_BIT(X86_XCPT_BP)
2118 | RT_BIT(X86_XCPT_DE)
2119 | RT_BIT(X86_XCPT_NM)
2120 | RT_BIT(X86_XCPT_TS)
2121 | RT_BIT(X86_XCPT_UD)
2122 | RT_BIT(X86_XCPT_NP)
2123 | RT_BIT(X86_XCPT_SS)
2124 | RT_BIT(X86_XCPT_GP)
2125 | RT_BIT(X86_XCPT_PF)
2126 | RT_BIT(X86_XCPT_MF)
2127 ;
2128#elif defined(HMVMX_ALWAYS_TRAP_PF)
2129 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2130#endif
2131 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2132 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2133 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
2134 uXcptBitmap |= RT_BIT(X86_XCPT_DE);
2135 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2136
2137 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2138 u64GuestCr0 |= fSetCr0;
2139 u64GuestCr0 &= fZapCr0;
2140 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2141
2142 Assert(!RT_HI_U32(u64GuestCr0));
2143 Assert(u64GuestCr0 & X86_CR0_NE);
2144
2145 /* Commit the CR0 and related fields to the guest VMCS. */
2146 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2147 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2148 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2149 {
2150 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2151 AssertRC(rc);
2152 }
2153 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2154 {
2155 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2156 AssertRC(rc);
2157 }
2158
2159 /* Update our caches. */
2160 pVmcsInfo->u32ProcCtls = uProcCtls;
2161 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2162
2163 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2164 }
2165 else
2166 {
2167 /*
2168 * With nested-guests, we may have extended the guest/host mask here since we
2169 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2170 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2171 * originally supplied. We must copy those bits from the nested-guest CR0 into
2172 * the nested-guest CR0 read-shadow.
2173 */
2174 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2175 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2176 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2177
2178 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2179 u64GuestCr0 |= fSetCr0;
2180 u64GuestCr0 &= fZapCr0;
2181 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2182
2183 Assert(!RT_HI_U32(u64GuestCr0));
2184 Assert(u64GuestCr0 & X86_CR0_NE);
2185
2186 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2187 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2188 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2189
2190 Log4Func(("cr0=%#RX64 shadow=%#RX64 vmcs_read_shw=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0,
2191 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u, fSetCr0, fZapCr0));
2192 }
2193
2194 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2195 }
2196
2197 return VINF_SUCCESS;
2198}
2199
2200
2201/**
2202 * Exports the guest control registers (CR3, CR4) into the guest-state area
2203 * in the VMCS.
2204 *
2205 * @returns VBox strict status code.
2206 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2207 * without unrestricted guest access and the VMMDev is not presently
2208 * mapped (e.g. EFI32).
2209 *
2210 * @param pVCpu The cross context virtual CPU structure.
2211 * @param pVmxTransient The VMX-transient structure.
2212 *
2213 * @remarks No-long-jump zone!!!
2214 */
2215static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2216{
2217 int rc = VINF_SUCCESS;
2218 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2219
2220 /*
2221 * Guest CR2.
2222 * It's always loaded in the assembler code. Nothing to do here.
2223 */
2224
2225 /*
2226 * Guest CR3.
2227 */
2228 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2229 {
2230 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2231
2232 if (VM_IS_VMX_NESTED_PAGING(pVM))
2233 {
2234#ifndef IN_NEM_DARWIN
2235 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2236 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2237
2238 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2239 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2240 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2241 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2242
2243 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2244 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2245 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2246
2247 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2248 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2249 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2250 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2251 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2252 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2253 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2254
2255 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2256 AssertRC(rc);
2257#endif
2258
2259 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2260 uint64_t u64GuestCr3 = pCtx->cr3;
2261 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2262 || CPUMIsGuestPagingEnabledEx(pCtx))
2263 {
2264 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2265 if (CPUMIsGuestInPAEModeEx(pCtx))
2266 {
2267 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2268 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2269 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2270 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2271 }
2272
2273 /*
2274 * The guest's view of its CR3 is unblemished with nested paging when the
2275 * guest is using paging or we have unrestricted guest execution to handle
2276 * the guest when it's not using paging.
2277 */
2278 }
2279#ifndef IN_NEM_DARWIN
2280 else
2281 {
2282 /*
2283 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2284 * thinks it accesses physical memory directly, we use our identity-mapped
2285 * page table to map guest-linear to guest-physical addresses. EPT takes care
2286 * of translating it to host-physical addresses.
2287 */
2288 RTGCPHYS GCPhys;
2289 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2290
2291 /* We obtain it here every time as the guest could have relocated this PCI region. */
2292 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2293 if (RT_SUCCESS(rc))
2294 { /* likely */ }
2295 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2296 {
2297 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2298 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2299 }
2300 else
2301 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2302
2303 u64GuestCr3 = GCPhys;
2304 }
2305#endif
2306
2307 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2308 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2309 AssertRC(rc);
2310 }
2311 else
2312 {
2313 Assert(!pVmxTransient->fIsNestedGuest);
2314 /* Non-nested paging case, just use the hypervisor's CR3. */
2315 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2316
2317 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2318 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2319 AssertRC(rc);
2320 }
2321
2322 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2323 }
2324
2325 /*
2326 * Guest CR4.
2327 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2328 */
2329 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2330 {
2331 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2332 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2333
2334 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2335 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2336
2337 /*
2338 * With nested-guests, we may have extended the guest/host mask here (since we
2339 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
2340 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2341 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2342 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2343 */
2344 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2345 uint64_t u64GuestCr4 = pCtx->cr4;
2346 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2347 ? pCtx->cr4
2348 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2349 Assert(!RT_HI_U32(u64GuestCr4));
2350
2351#ifndef IN_NEM_DARWIN
2352 /*
2353 * Setup VT-x's view of the guest CR4.
2354 *
2355 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2356 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2357 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2358 *
2359 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2360 */
2361 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2362 {
2363 Assert(pVM->hm.s.vmx.pRealModeTSS);
2364 Assert(PDMVmmDevHeapIsEnabled(pVM));
2365 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2366 }
2367#endif
2368
2369 if (VM_IS_VMX_NESTED_PAGING(pVM))
2370 {
2371 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2372 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2373 {
2374 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2375 u64GuestCr4 |= X86_CR4_PSE;
2376 /* Our identity mapping is a 32-bit page directory. */
2377 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2378 }
2379 /* else use guest CR4.*/
2380 }
2381 else
2382 {
2383 Assert(!pVmxTransient->fIsNestedGuest);
2384
2385 /*
2386 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2387 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2388 */
2389 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2390 {
2391 case PGMMODE_REAL: /* Real-mode. */
2392 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2393 case PGMMODE_32_BIT: /* 32-bit paging. */
2394 {
2395 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2396 break;
2397 }
2398
2399 case PGMMODE_PAE: /* PAE paging. */
2400 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2401 {
2402 u64GuestCr4 |= X86_CR4_PAE;
2403 break;
2404 }
2405
2406 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2407 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2408 {
2409#ifdef VBOX_WITH_64_BITS_GUESTS
2410 /* For our assumption in vmxHCShouldSwapEferMsr. */
2411 Assert(u64GuestCr4 & X86_CR4_PAE);
2412 break;
2413#endif
2414 }
2415 default:
2416 AssertFailed();
2417 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2418 }
2419 }
2420
2421 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2422 u64GuestCr4 |= fSetCr4;
2423 u64GuestCr4 &= fZapCr4;
2424
2425 Assert(!RT_HI_U32(u64GuestCr4));
2426 Assert(u64GuestCr4 & X86_CR4_VMXE);
2427
2428 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2429 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2430 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2431
2432#ifndef IN_NEM_DARWIN
2433 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2434 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2435 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2436 {
2437 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2438 hmR0VmxUpdateStartVmFunction(pVCpu);
2439 }
2440#endif
2441
2442 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2443
2444 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2445 }
2446 return rc;
2447}
2448
2449
2450#ifdef VBOX_STRICT
2451/**
2452 * Strict function to validate segment registers.
2453 *
2454 * @param pVCpu The cross context virtual CPU structure.
2455 * @param pVmcsInfo The VMCS info. object.
2456 *
2457 * @remarks Will import guest CR0 on strict builds during validation of
2458 * segments.
2459 */
2460static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2461{
2462 /*
2463 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2464 *
2465 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2466 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2467 * unusable bit and doesn't change the guest-context value.
2468 */
2469 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2470 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2471 vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2472 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2473 && ( !CPUMIsGuestInRealModeEx(pCtx)
2474 && !CPUMIsGuestInV86ModeEx(pCtx)))
2475 {
2476 /* Protected mode checks */
2477 /* CS */
2478 Assert(pCtx->cs.Attr.n.u1Present);
2479 Assert(!(pCtx->cs.Attr.u & 0xf00));
2480 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2481 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2482 || !(pCtx->cs.Attr.n.u1Granularity));
2483 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2484 || (pCtx->cs.Attr.n.u1Granularity));
2485 /* CS cannot be loaded with NULL in protected mode. */
2486 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2487 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2488 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2489 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2490 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2491 else
2492 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2493 /* SS */
2494 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2495 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2496 if ( !(pCtx->cr0 & X86_CR0_PE)
2497 || pCtx->cs.Attr.n.u4Type == 3)
2498 {
2499 Assert(!pCtx->ss.Attr.n.u2Dpl);
2500 }
2501 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2502 {
2503 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2504 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2505 Assert(pCtx->ss.Attr.n.u1Present);
2506 Assert(!(pCtx->ss.Attr.u & 0xf00));
2507 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2508 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2509 || !(pCtx->ss.Attr.n.u1Granularity));
2510 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2511 || (pCtx->ss.Attr.n.u1Granularity));
2512 }
2513 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2514 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2515 {
2516 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2517 Assert(pCtx->ds.Attr.n.u1Present);
2518 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2519 Assert(!(pCtx->ds.Attr.u & 0xf00));
2520 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2521 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2522 || !(pCtx->ds.Attr.n.u1Granularity));
2523 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2524 || (pCtx->ds.Attr.n.u1Granularity));
2525 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2526 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2527 }
2528 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2529 {
2530 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2531 Assert(pCtx->es.Attr.n.u1Present);
2532 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2533 Assert(!(pCtx->es.Attr.u & 0xf00));
2534 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2535 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2536 || !(pCtx->es.Attr.n.u1Granularity));
2537 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2538 || (pCtx->es.Attr.n.u1Granularity));
2539 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2540 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2541 }
2542 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2543 {
2544 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2545 Assert(pCtx->fs.Attr.n.u1Present);
2546 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2547 Assert(!(pCtx->fs.Attr.u & 0xf00));
2548 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2549 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2550 || !(pCtx->fs.Attr.n.u1Granularity));
2551 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2552 || (pCtx->fs.Attr.n.u1Granularity));
2553 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2554 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2555 }
2556 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2557 {
2558 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2559 Assert(pCtx->gs.Attr.n.u1Present);
2560 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2561 Assert(!(pCtx->gs.Attr.u & 0xf00));
2562 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2563 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2564 || !(pCtx->gs.Attr.n.u1Granularity));
2565 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2566 || (pCtx->gs.Attr.n.u1Granularity));
2567 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2568 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2569 }
2570 /* 64-bit capable CPUs. */
2571 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2572 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2573 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2574 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2575 }
2576 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2577 || ( CPUMIsGuestInRealModeEx(pCtx)
2578 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2579 {
2580 /* Real and v86 mode checks. */
2581 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2582 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2583#ifndef IN_NEM_DARWIN
2584 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2585 {
2586 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2587 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2588 }
2589 else
2590#endif
2591 {
2592 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2593 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2594 }
2595
2596 /* CS */
2597 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2598 Assert(pCtx->cs.u32Limit == 0xffff);
2599 AssertMsg(u32CSAttr == 0xf3, ("cs=%#x %#x ", pCtx->cs.Sel, u32CSAttr));
2600 /* SS */
2601 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2602 Assert(pCtx->ss.u32Limit == 0xffff);
2603 Assert(u32SSAttr == 0xf3);
2604 /* DS */
2605 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2606 Assert(pCtx->ds.u32Limit == 0xffff);
2607 Assert(u32DSAttr == 0xf3);
2608 /* ES */
2609 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2610 Assert(pCtx->es.u32Limit == 0xffff);
2611 Assert(u32ESAttr == 0xf3);
2612 /* FS */
2613 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2614 Assert(pCtx->fs.u32Limit == 0xffff);
2615 Assert(u32FSAttr == 0xf3);
2616 /* GS */
2617 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2618 Assert(pCtx->gs.u32Limit == 0xffff);
2619 Assert(u32GSAttr == 0xf3);
2620 /* 64-bit capable CPUs. */
2621 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2622 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2623 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2624 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2625 }
2626}
2627#endif /* VBOX_STRICT */
2628
2629
2630/**
2631 * Exports a guest segment register into the guest-state area in the VMCS.
2632 *
2633 * @returns VBox status code.
2634 * @param pVCpu The cross context virtual CPU structure.
2635 * @param pVmcsInfo The VMCS info. object.
2636 * @param iSegReg The segment register number (X86_SREG_XXX).
2637 * @param pSelReg Pointer to the segment selector.
2638 *
2639 * @remarks No-long-jump zone!!!
2640 */
2641static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2642{
2643 Assert(iSegReg < X86_SREG_COUNT);
2644
2645 uint32_t u32Access = pSelReg->Attr.u;
2646#ifndef IN_NEM_DARWIN
2647 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2648#endif
2649 {
2650 /*
2651 * The way to differentiate between whether this is really a null selector or was just
2652 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2653 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2654 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2655 * NULL selectors loaded in protected-mode have their attribute as 0.
2656 */
2657 if (u32Access)
2658 { }
2659 else
2660 u32Access = X86DESCATTR_UNUSABLE;
2661 }
2662#ifndef IN_NEM_DARWIN
2663 else
2664 {
2665 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2666 u32Access = 0xf3;
2667 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2668 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2669 RT_NOREF_PV(pVCpu);
2670 }
2671#else
2672 RT_NOREF(pVmcsInfo);
2673#endif
2674
2675 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2676 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2677 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2678
2679 /*
2680 * Commit it to the VMCS.
2681 */
2682 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2683 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2684 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2685 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2686 return VINF_SUCCESS;
2687}
2688
2689
2690/**
2691 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2692 * area in the VMCS.
2693 *
2694 * @returns VBox status code.
2695 * @param pVCpu The cross context virtual CPU structure.
2696 * @param pVmxTransient The VMX-transient structure.
2697 *
2698 * @remarks Will import guest CR0 on strict builds during validation of
2699 * segments.
2700 * @remarks No-long-jump zone!!!
2701 */
2702static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2703{
2704 int rc = VERR_INTERNAL_ERROR_5;
2705#ifndef IN_NEM_DARWIN
2706 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2707#endif
2708 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2709 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2710#ifndef IN_NEM_DARWIN
2711 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2712#endif
2713
2714 /*
2715 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2716 */
2717 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2718 {
2719 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2720 {
2721 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2722#ifndef IN_NEM_DARWIN
2723 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2724 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2725#endif
2726 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2727 AssertRC(rc);
2728 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2729 }
2730
2731 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2732 {
2733 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2734#ifndef IN_NEM_DARWIN
2735 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2736 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2737#endif
2738 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2739 AssertRC(rc);
2740 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2741 }
2742
2743 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2744 {
2745 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2746#ifndef IN_NEM_DARWIN
2747 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2748 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2749#endif
2750 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2751 AssertRC(rc);
2752 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2753 }
2754
2755 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2756 {
2757 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2758#ifndef IN_NEM_DARWIN
2759 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2760 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2761#endif
2762 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2763 AssertRC(rc);
2764 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2765 }
2766
2767 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2768 {
2769 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2770#ifndef IN_NEM_DARWIN
2771 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2772 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2773#endif
2774 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2775 AssertRC(rc);
2776 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2777 }
2778
2779 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2780 {
2781 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2782#ifndef IN_NEM_DARWIN
2783 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2784 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2785#endif
2786 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2787 AssertRC(rc);
2788 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2789 }
2790
2791#ifdef VBOX_STRICT
2792 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2793#endif
2794 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2795 pCtx->cs.Attr.u));
2796 }
2797
2798 /*
2799 * Guest TR.
2800 */
2801 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2802 {
2803 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2804
2805 /*
2806 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2807 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2808 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2809 */
2810 uint16_t u16Sel;
2811 uint32_t u32Limit;
2812 uint64_t u64Base;
2813 uint32_t u32AccessRights;
2814#ifndef IN_NEM_DARWIN
2815 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2816#endif
2817 {
2818 u16Sel = pCtx->tr.Sel;
2819 u32Limit = pCtx->tr.u32Limit;
2820 u64Base = pCtx->tr.u64Base;
2821 u32AccessRights = pCtx->tr.Attr.u;
2822 }
2823#ifndef IN_NEM_DARWIN
2824 else
2825 {
2826 Assert(!pVmxTransient->fIsNestedGuest);
2827 Assert(pVM->hm.s.vmx.pRealModeTSS);
2828 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2829
2830 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2831 RTGCPHYS GCPhys;
2832 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2833 AssertRCReturn(rc, rc);
2834
2835 X86DESCATTR DescAttr;
2836 DescAttr.u = 0;
2837 DescAttr.n.u1Present = 1;
2838 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2839
2840 u16Sel = 0;
2841 u32Limit = HM_VTX_TSS_SIZE;
2842 u64Base = GCPhys;
2843 u32AccessRights = DescAttr.u;
2844 }
2845#endif
2846
2847 /* Validate. */
2848 Assert(!(u16Sel & RT_BIT(2)));
2849 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2850 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2851 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2852 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2853 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2854 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2855 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2856 Assert( (u32Limit & 0xfff) == 0xfff
2857 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2858 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2859 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2860
2861 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2862 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2863 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2864 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2865
2866 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2867 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2868 }
2869
2870 /*
2871 * Guest GDTR.
2872 */
2873 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2874 {
2875 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2876
2877 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2878 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2879
2880 /* Validate. */
2881 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2882
2883 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2884 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2885 }
2886
2887 /*
2888 * Guest LDTR.
2889 */
2890 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2891 {
2892 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2893
2894 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2895 uint32_t u32Access;
2896 if ( !pVmxTransient->fIsNestedGuest
2897 && !pCtx->ldtr.Attr.u)
2898 u32Access = X86DESCATTR_UNUSABLE;
2899 else
2900 u32Access = pCtx->ldtr.Attr.u;
2901
2902 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2903 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2904 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2905 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2906
2907 /* Validate. */
2908 if (!(u32Access & X86DESCATTR_UNUSABLE))
2909 {
2910 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2911 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2912 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2913 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2914 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2915 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2916 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2917 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2918 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2919 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2920 }
2921
2922 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2923 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2924 }
2925
2926 /*
2927 * Guest IDTR.
2928 */
2929 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2930 {
2931 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2932
2933 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2934 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2935
2936 /* Validate. */
2937 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2938
2939 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2940 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2941 }
2942
2943 return VINF_SUCCESS;
2944}
2945
2946
2947/**
2948 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2949 * VM-exit interruption info type.
2950 *
2951 * @returns The IEM exception flags.
2952 * @param uVector The event vector.
2953 * @param uVmxEventType The VMX event type.
2954 *
2955 * @remarks This function currently only constructs flags required for
2956 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2957 * and CR2 aspects of an exception are not included).
2958 */
2959static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2960{
2961 uint32_t fIemXcptFlags;
2962 switch (uVmxEventType)
2963 {
2964 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2965 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2966 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2967 break;
2968
2969 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2970 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2971 break;
2972
2973 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
2974 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
2975 break;
2976
2977 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
2978 {
2979 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2980 if (uVector == X86_XCPT_BP)
2981 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
2982 else if (uVector == X86_XCPT_OF)
2983 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
2984 else
2985 {
2986 fIemXcptFlags = 0;
2987 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
2988 }
2989 break;
2990 }
2991
2992 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
2993 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
2994 break;
2995
2996 default:
2997 fIemXcptFlags = 0;
2998 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
2999 break;
3000 }
3001 return fIemXcptFlags;
3002}
3003
3004
3005/**
3006 * Sets an event as a pending event to be injected into the guest.
3007 *
3008 * @param pVCpu The cross context virtual CPU structure.
3009 * @param u32IntInfo The VM-entry interruption-information field.
3010 * @param cbInstr The VM-entry instruction length in bytes (for
3011 * software interrupts, exceptions and privileged
3012 * software exceptions).
3013 * @param u32ErrCode The VM-entry exception error code.
3014 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3015 * page-fault.
3016 */
3017DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3018 RTGCUINTPTR GCPtrFaultAddress)
3019{
3020 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3021 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3022 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3023 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3024 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3025 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3026}
3027
3028
3029/**
3030 * Sets an external interrupt as pending-for-injection into the VM.
3031 *
3032 * @param pVCpu The cross context virtual CPU structure.
3033 * @param u8Interrupt The external interrupt vector.
3034 */
3035DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3036{
3037 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3038 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3039 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3040 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3041 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3042}
3043
3044
3045/**
3046 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3047 *
3048 * @param pVCpu The cross context virtual CPU structure.
3049 */
3050DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3051{
3052 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3053 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3054 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3055 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3056 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3057}
3058
3059
3060/**
3061 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3062 *
3063 * @param pVCpu The cross context virtual CPU structure.
3064 */
3065DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3066{
3067 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3068 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3069 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3070 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3071 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3072}
3073
3074
3075/**
3076 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3077 *
3078 * @param pVCpu The cross context virtual CPU structure.
3079 */
3080DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3081{
3082 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3083 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3084 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3086 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3087}
3088
3089
3090/**
3091 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3092 *
3093 * @param pVCpu The cross context virtual CPU structure.
3094 */
3095DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3096{
3097 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3098 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3099 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3101 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3102}
3103
3104
3105#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3106/**
3107 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3108 *
3109 * @param pVCpu The cross context virtual CPU structure.
3110 * @param u32ErrCode The error code for the general-protection exception.
3111 */
3112DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3113{
3114 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3115 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3116 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3117 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3118 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3119}
3120
3121
3122/**
3123 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3124 *
3125 * @param pVCpu The cross context virtual CPU structure.
3126 * @param u32ErrCode The error code for the stack exception.
3127 */
3128DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3129{
3130 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3131 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3132 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3133 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3134 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3135}
3136#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3137
3138
3139/**
3140 * Fixes up attributes for the specified segment register.
3141 *
3142 * @param pVCpu The cross context virtual CPU structure.
3143 * @param pSelReg The segment register that needs fixing.
3144 * @param pszRegName The register name (for logging and assertions).
3145 */
3146static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3147{
3148 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3149
3150 /*
3151 * If VT-x marks the segment as unusable, most other bits remain undefined:
3152 * - For CS the L, D and G bits have meaning.
3153 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3154 * - For the remaining data segments no bits are defined.
3155 *
3156 * The present bit and the unusable bit has been observed to be set at the
3157 * same time (the selector was supposed to be invalid as we started executing
3158 * a V8086 interrupt in ring-0).
3159 *
3160 * What should be important for the rest of the VBox code, is that the P bit is
3161 * cleared. Some of the other VBox code recognizes the unusable bit, but
3162 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3163 * safe side here, we'll strip off P and other bits we don't care about. If
3164 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3165 *
3166 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3167 */
3168#ifdef VBOX_STRICT
3169 uint32_t const uAttr = pSelReg->Attr.u;
3170#endif
3171
3172 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3173 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3174 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3175
3176#ifdef VBOX_STRICT
3177# ifndef IN_NEM_DARWIN
3178 VMMRZCallRing3Disable(pVCpu);
3179# endif
3180 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3181# ifdef DEBUG_bird
3182 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3183 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3184 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3185# endif
3186# ifndef IN_NEM_DARWIN
3187 VMMRZCallRing3Enable(pVCpu);
3188# endif
3189 NOREF(uAttr);
3190#endif
3191 RT_NOREF2(pVCpu, pszRegName);
3192}
3193
3194
3195/**
3196 * Imports a guest segment register from the current VMCS into the guest-CPU
3197 * context.
3198 *
3199 * @param pVCpu The cross context virtual CPU structure.
3200 * @tparam a_iSegReg The segment register number (X86_SREG_XXX).
3201 *
3202 * @remarks Called with interrupts and/or preemption disabled.
3203 */
3204template<uint32_t const a_iSegReg>
3205DECLINLINE(void) vmxHCImportGuestSegReg(PVMCPUCC pVCpu)
3206{
3207 AssertCompile(a_iSegReg < X86_SREG_COUNT);
3208 /* Check that the macros we depend upon here and in the export parenter function works: */
3209#define MY_SEG_VMCS_FIELD(a_FieldPrefix, a_FieldSuff) \
3210 ( a_iSegReg == X86_SREG_ES ? a_FieldPrefix ## ES ## a_FieldSuff \
3211 : a_iSegReg == X86_SREG_CS ? a_FieldPrefix ## CS ## a_FieldSuff \
3212 : a_iSegReg == X86_SREG_SS ? a_FieldPrefix ## SS ## a_FieldSuff \
3213 : a_iSegReg == X86_SREG_DS ? a_FieldPrefix ## DS ## a_FieldSuff \
3214 : a_iSegReg == X86_SREG_FS ? a_FieldPrefix ## FS ## a_FieldSuff \
3215 : a_iSegReg == X86_SREG_GS ? a_FieldPrefix ## GS ## a_FieldSuff : 0)
3216 AssertCompile(VMX_VMCS_GUEST_SEG_BASE(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS_GUEST_,_BASE));
3217 AssertCompile(VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS16_GUEST_,_SEL));
3218 AssertCompile(VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_LIMIT));
3219 AssertCompile(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg) == MY_SEG_VMCS_FIELD(VMX_VMCS32_GUEST_,_ACCESS_RIGHTS));
3220
3221 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[a_iSegReg];
3222
3223 uint16_t u16Sel;
3224 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(a_iSegReg), &u16Sel); AssertRC(rc);
3225 pSelReg->Sel = u16Sel;
3226 pSelReg->ValidSel = u16Sel;
3227
3228 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(a_iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3229 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(a_iSegReg), &pSelReg->u64Base); AssertRC(rc);
3230
3231 uint32_t u32Attr;
3232 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(a_iSegReg), &u32Attr); AssertRC(rc);
3233 pSelReg->Attr.u = u32Attr;
3234 if (u32Attr & X86DESCATTR_UNUSABLE)
3235 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + a_iSegReg * 3);
3236
3237 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3238}
3239
3240
3241/**
3242 * Imports the guest LDTR from the VMCS into the guest-CPU context.
3243 *
3244 * @param pVCpu The cross context virtual CPU structure.
3245 *
3246 * @remarks Called with interrupts and/or preemption disabled.
3247 */
3248DECL_FORCE_INLINE(void) vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3249{
3250 uint16_t u16Sel;
3251 uint64_t u64Base;
3252 uint32_t u32Limit, u32Attr;
3253 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3254 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3255 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3256 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3257
3258 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3259 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3260 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3261 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3262 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3263 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3264 if (u32Attr & X86DESCATTR_UNUSABLE)
3265 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3266}
3267
3268
3269/**
3270 * Imports the guest TR from the VMCS into the guest-CPU context.
3271 *
3272 * @param pVCpu The cross context virtual CPU structure.
3273 *
3274 * @remarks Called with interrupts and/or preemption disabled.
3275 */
3276DECL_FORCE_INLINE(void) vmxHCImportGuestTr(PVMCPUCC pVCpu)
3277{
3278 uint16_t u16Sel;
3279 uint64_t u64Base;
3280 uint32_t u32Limit, u32Attr;
3281 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3282 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3283 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3284 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3285
3286 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3287 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3288 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3289 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3290 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3291 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3292 /* TR is the only selector that can never be unusable. */
3293 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3294}
3295
3296
3297/**
3298 * Core: Imports the guest RIP from the VMCS into the guest-CPU context.
3299 *
3300 * @returns The RIP value.
3301 * @param pVCpu The cross context virtual CPU structure.
3302 *
3303 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3304 * @remarks Do -not- call this function directly!
3305 */
3306DECL_FORCE_INLINE(uint64_t) vmxHCImportGuestCoreRip(PVMCPUCC pVCpu)
3307{
3308 uint64_t u64Val;
3309 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3310 AssertRC(rc);
3311
3312 pVCpu->cpum.GstCtx.rip = u64Val;
3313
3314 return u64Val;
3315}
3316
3317
3318/**
3319 * Imports the guest RIP from the VMCS into the guest-CPU context.
3320 *
3321 * @param pVCpu The cross context virtual CPU structure.
3322 *
3323 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3324 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3325 * instead!!!
3326 */
3327DECL_FORCE_INLINE(void) vmxHCImportGuestRip(PVMCPUCC pVCpu)
3328{
3329 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP)
3330 {
3331 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3332 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3333 }
3334}
3335
3336
3337/**
3338 * Core: Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3339 *
3340 * @param pVCpu The cross context virtual CPU structure.
3341 * @param pVmcsInfo The VMCS info. object.
3342 *
3343 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3344 * @remarks Do -not- call this function directly!
3345 */
3346DECL_FORCE_INLINE(void) vmxHCImportGuestCoreRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3347{
3348 uint64_t fRFlags;
3349 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &fRFlags);
3350 AssertRC(rc);
3351
3352 Assert((fRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK);
3353 Assert((fRFlags & ~(uint64_t)(X86_EFL_1 | X86_EFL_LIVE_MASK)) == 0);
3354
3355 pVCpu->cpum.GstCtx.rflags.u = fRFlags;
3356#ifndef IN_NEM_DARWIN
3357 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3358 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
3359 { /* mostly likely */ }
3360 else
3361 {
3362 pVCpu->cpum.GstCtx.eflags.Bits.u1VM = 0;
3363 pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3364 }
3365#else
3366 RT_NOREF(pVmcsInfo);
3367#endif
3368}
3369
3370
3371/**
3372 * Imports the guest RFLAGS from the VMCS into the guest-CPU context.
3373 *
3374 * @param pVCpu The cross context virtual CPU structure.
3375 * @param pVmcsInfo The VMCS info. object.
3376 *
3377 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3378 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3379 * instead!!!
3380 */
3381DECL_FORCE_INLINE(void) vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3382{
3383 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RFLAGS)
3384 {
3385 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3386 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3387 }
3388}
3389
3390
3391#ifndef IN_NEM_DARWIN
3392/**
3393 * Imports the guest TSX AUX and certain other MSRs from the VMCS into the guest-CPU
3394 * context.
3395 *
3396 * The other MSRs are in the VM-exit MSR-store.
3397 *
3398 * @returns VBox status code.
3399 * @param pVCpu The cross context virtual CPU structure.
3400 * @param pVmcsInfo The VMCS info. object.
3401 * @param fEFlags Saved EFLAGS for restoring the interrupt flag (in case of
3402 * unexpected errors). Ignored in NEM/darwin context.
3403 */
3404DECL_FORCE_INLINE(int) vmxHCImportGuestTscAuxAndOtherMsrs(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3405{
3406 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3407 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3408 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3409 Assert(pMsrs);
3410 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3411 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3412 for (uint32_t i = 0; i < cMsrs; i++)
3413 {
3414 uint32_t const idMsr = pMsrs[i].u32Msr;
3415 switch (idMsr)
3416 {
3417 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3418 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3419 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3420 default:
3421 {
3422 uint32_t idxLbrMsr;
3423 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3424 if (VM_IS_VMX_LBR(pVM))
3425 {
3426 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3427 {
3428 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3429 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3430 break;
3431 }
3432 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3433 {
3434 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3435 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3436 break;
3437 }
3438 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3439 {
3440 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3441 break;
3442 }
3443 /* Fallthru (no break) */
3444 }
3445 pVCpu->cpum.GstCtx.fExtrn = 0;
3446 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3447 ASMSetFlags(fEFlags);
3448 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3449 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3450 }
3451 }
3452 }
3453 return VINF_SUCCESS;
3454}
3455#endif /* !IN_NEM_DARWIN */
3456
3457
3458/**
3459 * Imports the guest CR0 from the VMCS into the guest-CPU context.
3460 *
3461 * @param pVCpu The cross context virtual CPU structure.
3462 * @param pVmcsInfo The VMCS info. object.
3463 */
3464DECL_FORCE_INLINE(void) vmxHCImportGuestCr0(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3465{
3466 uint64_t u64Cr0;
3467 uint64_t u64Shadow;
3468 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3469 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3470#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3471 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3472 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3473#else
3474 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
3475 {
3476 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3477 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3478 }
3479 else
3480 {
3481 /*
3482 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3483 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3484 * re-construct CR0. See @bugref{9180#c95} for details.
3485 */
3486 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3487 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3488 u64Cr0 = (u64Cr0 & ~(pVmcsInfoGst->u64Cr0Mask & pVmcsNstGst->u64Cr0Mask.u))
3489 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3490 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3491 Assert(u64Cr0 & X86_CR0_NE);
3492 }
3493#endif
3494
3495#ifndef IN_NEM_DARWIN
3496 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3497#endif
3498 CPUMSetGuestCR0(pVCpu, u64Cr0);
3499#ifndef IN_NEM_DARWIN
3500 VMMRZCallRing3Enable(pVCpu);
3501#endif
3502}
3503
3504
3505/**
3506 * Imports the guest CR3 from the VMCS into the guest-CPU context.
3507 *
3508 * @param pVCpu The cross context virtual CPU structure.
3509 */
3510DECL_FORCE_INLINE(void) vmxHCImportGuestCr3(PVMCPUCC pVCpu)
3511{
3512 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3513 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3514
3515 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3516 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3517 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3518 && CPUMIsGuestPagingEnabledEx(pCtx)))
3519 {
3520 uint64_t u64Cr3;
3521 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3522 if (pCtx->cr3 != u64Cr3)
3523 {
3524 pCtx->cr3 = u64Cr3;
3525 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3526 }
3527
3528 /*
3529 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3530 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3531 */
3532 if (CPUMIsGuestInPAEModeEx(pCtx))
3533 {
3534 X86PDPE aPaePdpes[4];
3535 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3536 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3537 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3538 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3539 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3540 {
3541 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3542 /* PGM now updates PAE PDPTEs while updating CR3. */
3543 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3544 }
3545 }
3546 }
3547}
3548
3549
3550/**
3551 * Imports the guest CR4 from the VMCS into the guest-CPU context.
3552 *
3553 * @param pVCpu The cross context virtual CPU structure.
3554 * @param pVmcsInfo The VMCS info. object.
3555 */
3556DECL_FORCE_INLINE(void) vmxHCImportGuestCr4(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3557{
3558 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3559 uint64_t u64Cr4;
3560 uint64_t u64Shadow;
3561 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3562 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3563#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3564 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3565 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3566#else
3567 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3568 {
3569 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3570 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3571 }
3572 else
3573 {
3574 /*
3575 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3576 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3577 * re-construct CR4. See @bugref{9180#c95} for details.
3578 */
3579 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3580 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3581 u64Cr4 = (u64Cr4 & ~(pVmcsInfo->u64Cr4Mask & pVmcsNstGst->u64Cr4Mask.u))
3582 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3583 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3584 Assert(u64Cr4 & X86_CR4_VMXE);
3585 }
3586#endif
3587 pCtx->cr4 = u64Cr4;
3588}
3589
3590
3591/**
3592 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
3593 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
3594 */
3595DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
3596{
3597 /*
3598 * We must import RIP here to set our EM interrupt-inhibited state.
3599 * We also import RFLAGS as our code that evaluates pending interrupts
3600 * before VM-entry requires it.
3601 */
3602 vmxHCImportGuestRip(pVCpu);
3603 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3604
3605 CPUMUpdateInterruptShadowSsStiEx(&pVCpu->cpum.GstCtx,
3606 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
3607 RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
3608 pVCpu->cpum.GstCtx.rip);
3609 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
3610}
3611
3612
3613/**
3614 * Imports the guest interruptibility-state from the VMCS into the guest-CPU
3615 * context.
3616 *
3617 * @note May import RIP and RFLAGS if interrupt or NMI are blocked.
3618 *
3619 * @param pVCpu The cross context virtual CPU structure.
3620 * @param pVmcsInfo The VMCS info. object.
3621 *
3622 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3623 * do not log!
3624 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3625 * instead!!!
3626 */
3627DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3628{
3629 uint32_t u32Val;
3630 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3631 if (!u32Val)
3632 {
3633 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
3634 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
3635 }
3636 else
3637 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
3638}
3639
3640
3641/**
3642 * Worker for VMXR0ImportStateOnDemand.
3643 *
3644 * @returns VBox status code.
3645 * @param pVCpu The cross context virtual CPU structure.
3646 * @param pVmcsInfo The VMCS info. object.
3647 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3648 */
3649static int vmxHCImportGuestStateEx(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3650{
3651 int rc = VINF_SUCCESS;
3652 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3653 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3654 uint32_t u32Val;
3655
3656 /*
3657 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3658 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3659 * neither are other host platforms.
3660 *
3661 * Committing this temporarily as it prevents BSOD.
3662 *
3663 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3664 */
3665#ifdef RT_OS_WINDOWS
3666 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3667 return VERR_HM_IPE_1;
3668#endif
3669
3670 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3671
3672#ifndef IN_NEM_DARWIN
3673 /*
3674 * We disable interrupts to make the updating of the state and in particular
3675 * the fExtrn modification atomic wrt to preemption hooks.
3676 */
3677 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3678#endif
3679
3680 fWhat &= pCtx->fExtrn;
3681 if (fWhat)
3682 {
3683 do
3684 {
3685 if (fWhat & CPUMCTX_EXTRN_RIP)
3686 vmxHCImportGuestRip(pVCpu);
3687
3688 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3689 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3690
3691 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3692 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3693 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3694
3695 if (fWhat & CPUMCTX_EXTRN_RSP)
3696 {
3697 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3698 AssertRC(rc);
3699 }
3700
3701 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3702 {
3703 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3704#ifndef IN_NEM_DARWIN
3705 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3706#else
3707 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3708#endif
3709 if (fWhat & CPUMCTX_EXTRN_CS)
3710 {
3711 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3712 vmxHCImportGuestRip(pVCpu); /** @todo WTF? */
3713 if (fRealOnV86Active)
3714 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3715 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3716 }
3717 if (fWhat & CPUMCTX_EXTRN_SS)
3718 {
3719 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3720 if (fRealOnV86Active)
3721 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3722 }
3723 if (fWhat & CPUMCTX_EXTRN_DS)
3724 {
3725 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3726 if (fRealOnV86Active)
3727 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3728 }
3729 if (fWhat & CPUMCTX_EXTRN_ES)
3730 {
3731 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3732 if (fRealOnV86Active)
3733 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3734 }
3735 if (fWhat & CPUMCTX_EXTRN_FS)
3736 {
3737 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3738 if (fRealOnV86Active)
3739 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3740 }
3741 if (fWhat & CPUMCTX_EXTRN_GS)
3742 {
3743 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3744 if (fRealOnV86Active)
3745 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3746 }
3747 }
3748
3749 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3750 {
3751 if (fWhat & CPUMCTX_EXTRN_LDTR)
3752 vmxHCImportGuestLdtr(pVCpu);
3753
3754 if (fWhat & CPUMCTX_EXTRN_GDTR)
3755 {
3756 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3757 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3758 pCtx->gdtr.cbGdt = u32Val;
3759 }
3760
3761 /* Guest IDTR. */
3762 if (fWhat & CPUMCTX_EXTRN_IDTR)
3763 {
3764 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3765 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3766 pCtx->idtr.cbIdt = u32Val;
3767 }
3768
3769 /* Guest TR. */
3770 if (fWhat & CPUMCTX_EXTRN_TR)
3771 {
3772#ifndef IN_NEM_DARWIN
3773 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3774 don't need to import that one. */
3775 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3776#endif
3777 vmxHCImportGuestTr(pVCpu);
3778 }
3779 }
3780
3781 if (fWhat & CPUMCTX_EXTRN_DR7)
3782 {
3783#ifndef IN_NEM_DARWIN
3784 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3785#endif
3786 {
3787 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3788 AssertRC(rc);
3789 }
3790 }
3791
3792 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3793 {
3794 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3795 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3796 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3797 pCtx->SysEnter.cs = u32Val;
3798 }
3799
3800#ifndef IN_NEM_DARWIN
3801 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3802 {
3803 if ( pVM->hmr0.s.fAllow64BitGuests
3804 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3805 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3806 }
3807
3808 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3809 {
3810 if ( pVM->hmr0.s.fAllow64BitGuests
3811 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3812 {
3813 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3814 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3815 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3816 }
3817 }
3818
3819 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3820 {
3821 rc = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
3822 AssertRCReturn(rc, rc);
3823 }
3824#endif
3825
3826 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3827 {
3828 if (fWhat & CPUMCTX_EXTRN_CR0)
3829 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
3830
3831 if (fWhat & CPUMCTX_EXTRN_CR4)
3832 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
3833
3834 if (fWhat & CPUMCTX_EXTRN_CR3)
3835 vmxHCImportGuestCr3(pVCpu);
3836 }
3837
3838#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3839 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3840 {
3841 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3842 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3843 {
3844 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3845 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3846 if (RT_SUCCESS(rc))
3847 { /* likely */ }
3848 else
3849 break;
3850 }
3851 }
3852#endif
3853 } while (0);
3854
3855 if (RT_SUCCESS(rc))
3856 {
3857 /* Update fExtrn. */
3858 pCtx->fExtrn &= ~fWhat;
3859
3860 /* If everything has been imported, clear the HM keeper bit. */
3861 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3862 {
3863#ifndef IN_NEM_DARWIN
3864 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3865#else
3866 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3867#endif
3868 Assert(!pCtx->fExtrn);
3869 }
3870 }
3871 }
3872#ifndef IN_NEM_DARWIN
3873 else
3874 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3875
3876 /*
3877 * Restore interrupts.
3878 */
3879 ASMSetFlags(fEFlags);
3880#endif
3881
3882 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3883
3884 if (RT_SUCCESS(rc))
3885 { /* likely */ }
3886 else
3887 return rc;
3888
3889 /*
3890 * Honor any pending CR3 updates.
3891 *
3892 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3893 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3894 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3895 *
3896 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3897 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3898 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3899 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3900 *
3901 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3902 *
3903 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3904 */
3905 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3906#ifndef IN_NEM_DARWIN
3907 && VMMRZCallRing3IsEnabled(pVCpu)
3908#endif
3909 )
3910 {
3911 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3912 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3913 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3914 }
3915
3916 return VINF_SUCCESS;
3917}
3918
3919
3920/**
3921 * Internal state fetcher, inner version where we fetch all of a_fWhat.
3922 *
3923 * @returns VBox status code.
3924 * @param pVCpu The cross context virtual CPU structure.
3925 * @param pVmcsInfo The VMCS info. object.
3926 * @param fEFlags Saved EFLAGS for restoring the interrupt flag. Ignored
3927 * in NEM/darwin context.
3928 * @tparam a_fWhat What to import, zero or more bits from
3929 * HMVMX_CPUMCTX_EXTRN_ALL.
3930 */
3931template<uint64_t const a_fWhat>
3932static int vmxHCImportGuestStateInner(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t fEFlags)
3933{
3934 Assert(a_fWhat != 0); /* No AssertCompile as the assertion probably kicks in before the compiler (clang) discards it. */
3935 AssertCompile(!(a_fWhat & ~HMVMX_CPUMCTX_EXTRN_ALL));
3936 Assert( (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == a_fWhat
3937 || (pVCpu->cpum.GstCtx.fExtrn & a_fWhat) == (a_fWhat & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
3938
3939 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3940
3941 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3942
3943 /* RIP and RFLAGS may have been imported already by the post exit code
3944 together with the CPUMCTX_EXTRN_INHIBIT_INT/NMI state, so this part
3945 of the code is skipping this part of the code. */
3946 if ( (a_fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3947 && pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS))
3948 {
3949 if (a_fWhat & CPUMCTX_EXTRN_RFLAGS)
3950 vmxHCImportGuestCoreRFlags(pVCpu, pVmcsInfo);
3951
3952 if (a_fWhat & CPUMCTX_EXTRN_RIP)
3953 {
3954 if (!(a_fWhat & CPUMCTX_EXTRN_CS))
3955 EMHistoryUpdatePC(pVCpu, vmxHCImportGuestCoreRip(pVCpu), false);
3956 else
3957 vmxHCImportGuestCoreRip(pVCpu);
3958 }
3959 }
3960
3961 /* Note! vmxHCImportGuestIntrState may also include RIP and RFLAGS and update fExtrn. */
3962 if (a_fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3963 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3964
3965 if (a_fWhat & (CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TR))
3966 {
3967 if (a_fWhat & CPUMCTX_EXTRN_CS)
3968 {
3969 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
3970 /** @todo try get rid of this carp, it smells and is probably never ever
3971 * used: */
3972 if ( !(a_fWhat & CPUMCTX_EXTRN_RIP)
3973 && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_RIP))
3974 {
3975 vmxHCImportGuestCoreRip(pVCpu);
3976 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
3977 }
3978 EMHistoryUpdatePC(pVCpu, pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, true /* fFlattened */);
3979 }
3980 if (a_fWhat & CPUMCTX_EXTRN_SS)
3981 vmxHCImportGuestSegReg<X86_SREG_SS>(pVCpu);
3982 if (a_fWhat & CPUMCTX_EXTRN_DS)
3983 vmxHCImportGuestSegReg<X86_SREG_DS>(pVCpu);
3984 if (a_fWhat & CPUMCTX_EXTRN_ES)
3985 vmxHCImportGuestSegReg<X86_SREG_ES>(pVCpu);
3986 if (a_fWhat & CPUMCTX_EXTRN_FS)
3987 vmxHCImportGuestSegReg<X86_SREG_FS>(pVCpu);
3988 if (a_fWhat & CPUMCTX_EXTRN_GS)
3989 vmxHCImportGuestSegReg<X86_SREG_GS>(pVCpu);
3990
3991 /* Guest TR.
3992 Real-mode emulation using virtual-8086 mode has the fake TSS
3993 (pRealModeTSS) in TR, don't need to import that one. */
3994#ifndef IN_NEM_DARWIN
3995 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmcsInfo->pShared;
3996 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3997 if ((a_fWhat & CPUMCTX_EXTRN_TR) && !fRealOnV86Active)
3998#else
3999 if (a_fWhat & CPUMCTX_EXTRN_TR)
4000#endif
4001 vmxHCImportGuestTr(pVCpu);
4002
4003#ifndef IN_NEM_DARWIN /* NEM/Darwin: HV supports only unrestricted guest execution. */
4004 if (fRealOnV86Active)
4005 {
4006 if (a_fWhat & CPUMCTX_EXTRN_CS)
4007 pVCpu->cpum.GstCtx.cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
4008 if (a_fWhat & CPUMCTX_EXTRN_SS)
4009 pVCpu->cpum.GstCtx.ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
4010 if (a_fWhat & CPUMCTX_EXTRN_DS)
4011 pVCpu->cpum.GstCtx.ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
4012 if (a_fWhat & CPUMCTX_EXTRN_ES)
4013 pVCpu->cpum.GstCtx.es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
4014 if (a_fWhat & CPUMCTX_EXTRN_FS)
4015 pVCpu->cpum.GstCtx.fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
4016 if (a_fWhat & CPUMCTX_EXTRN_GS)
4017 pVCpu->cpum.GstCtx.gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
4018 }
4019#endif
4020 }
4021
4022 if (a_fWhat & CPUMCTX_EXTRN_RSP)
4023 {
4024 int const rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pVCpu->cpum.GstCtx.rsp);
4025 AssertRC(rc);
4026 }
4027
4028 if (a_fWhat & CPUMCTX_EXTRN_LDTR)
4029 vmxHCImportGuestLdtr(pVCpu);
4030
4031 if (a_fWhat & CPUMCTX_EXTRN_GDTR)
4032 {
4033 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pVCpu->cpum.GstCtx.gdtr.pGdt); AssertRC(rc1);
4034 uint32_t u32Val;
4035 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc2);
4036 pVCpu->cpum.GstCtx.gdtr.cbGdt = (uint16_t)u32Val;
4037 }
4038
4039 /* Guest IDTR. */
4040 if (a_fWhat & CPUMCTX_EXTRN_IDTR)
4041 {
4042 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pVCpu->cpum.GstCtx.idtr.pIdt); AssertRC(rc1);
4043 uint32_t u32Val;
4044 int const rc2 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc2);
4045 pVCpu->cpum.GstCtx.idtr.cbIdt = (uint64_t)u32Val;
4046 }
4047
4048 if (a_fWhat & CPUMCTX_EXTRN_DR7)
4049 {
4050#ifndef IN_NEM_DARWIN
4051 if (!pVCpu->hmr0.s.fUsingHyperDR7)
4052#endif
4053 {
4054 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pVCpu->cpum.GstCtx.dr[7]);
4055 AssertRC(rc);
4056 }
4057 }
4058
4059 if (a_fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
4060 {
4061 int const rc1 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pVCpu->cpum.GstCtx.SysEnter.eip); AssertRC(rc1);
4062 int const rc2 = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pVCpu->cpum.GstCtx.SysEnter.esp); AssertRC(rc2);
4063 uint32_t u32Val;
4064 int const rc3 = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc3);
4065 pVCpu->cpum.GstCtx.SysEnter.cs = u32Val;
4066 }
4067
4068#ifndef IN_NEM_DARWIN
4069 if (a_fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
4070 {
4071 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4072 && pVM->hmr0.s.fAllow64BitGuests)
4073 pVCpu->cpum.GstCtx.msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
4074 }
4075
4076 if (a_fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
4077 {
4078 if ( (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
4079 && pVM->hmr0.s.fAllow64BitGuests)
4080 {
4081 pVCpu->cpum.GstCtx.msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
4082 pVCpu->cpum.GstCtx.msrSTAR = ASMRdMsr(MSR_K6_STAR);
4083 pVCpu->cpum.GstCtx.msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
4084 }
4085 }
4086
4087 if (a_fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
4088 {
4089 int const rc1 = vmxHCImportGuestTscAuxAndOtherMsrs(pVCpu, pVmcsInfo, fEFlags);
4090 AssertRCReturn(rc1, rc1);
4091 }
4092#endif
4093
4094 if (a_fWhat & CPUMCTX_EXTRN_CR0)
4095 vmxHCImportGuestCr0(pVCpu, pVmcsInfo);
4096
4097 if (a_fWhat & CPUMCTX_EXTRN_CR4)
4098 vmxHCImportGuestCr4(pVCpu, pVmcsInfo);
4099
4100 if (a_fWhat & CPUMCTX_EXTRN_CR3)
4101 vmxHCImportGuestCr3(pVCpu);
4102
4103#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4104 if (a_fWhat & CPUMCTX_EXTRN_HWVIRT)
4105 {
4106 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4107 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
4108 {
4109 Assert(CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx));
4110 int const rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
4111 AssertRCReturn(rc, rc);
4112 }
4113 }
4114#endif
4115
4116 /* Update fExtrn. */
4117 pVCpu->cpum.GstCtx.fExtrn &= ~a_fWhat;
4118
4119 /* If everything has been imported, clear the HM keeper bit. */
4120 if (!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
4121 {
4122#ifndef IN_NEM_DARWIN
4123 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
4124#else
4125 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
4126#endif
4127 Assert(!pVCpu->cpum.GstCtx.fExtrn);
4128 }
4129
4130 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
4131
4132 /*
4133 * Honor any pending CR3 updates.
4134 *
4135 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
4136 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
4137 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
4138 *
4139 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
4140 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
4141 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
4142 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
4143 *
4144 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
4145 *
4146 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
4147 */
4148#ifndef IN_NEM_DARWIN
4149 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4150 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu))
4151 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) || !VMMRZCallRing3IsEnabled(pVCpu) )
4152 return VINF_SUCCESS;
4153 ASMSetFlags(fEFlags);
4154#else
4155 if (!(a_fWhat & CPUMCTX_EXTRN_CR3)
4156 ? RT_LIKELY(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4157 : !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3) )
4158 return VINF_SUCCESS;
4159 RT_NOREF_PV(fEFlags);
4160#endif
4161
4162 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
4163 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4164 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
4165 return VINF_SUCCESS;
4166}
4167
4168
4169/**
4170 * Internal state fetcher.
4171 *
4172 * @returns VBox status code.
4173 * @param pVCpu The cross context virtual CPU structure.
4174 * @param pVmcsInfo The VMCS info. object.
4175 * @param pszCaller For logging.
4176 * @tparam a_fWhat What needs to be imported, CPUMCTX_EXTRN_XXX.
4177 * @tparam a_fDoneLocal What's ASSUMED to have been retrieved locally
4178 * already. This is ORed together with @a a_fWhat when
4179 * calculating what needs fetching (just for safety).
4180 * @tparam a_fDonePostExit What's ASSUMED to been been retrieved by
4181 * hmR0VmxPostRunGuest()/nemR3DarwinHandleExitCommon()
4182 * already. This is ORed together with @a a_fWhat when
4183 * calculating what needs fetching (just for safety).
4184 */
4185template<uint64_t const a_fWhat,
4186 uint64_t const a_fDoneLocal = 0,
4187 uint64_t const a_fDonePostExit = 0
4188#ifndef IN_NEM_DARWIN
4189 | CPUMCTX_EXTRN_INHIBIT_INT
4190 | CPUMCTX_EXTRN_INHIBIT_NMI
4191# if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
4192 | HMVMX_CPUMCTX_EXTRN_ALL
4193# elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
4194 | CPUMCTX_EXTRN_RFLAGS
4195# endif
4196#else /* IN_NEM_DARWIN */
4197 | CPUMCTX_EXTRN_ALL /** @todo optimize */
4198#endif /* IN_NEM_DARWIN */
4199>
4200DECLINLINE(int) vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, const char *pszCaller)
4201{
4202 RT_NOREF_PV(pszCaller);
4203 if ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL)
4204 {
4205#ifndef IN_NEM_DARWIN
4206 /*
4207 * We disable interrupts to make the updating of the state and in particular
4208 * the fExtrn modification atomic wrt to preemption hooks.
4209 */
4210 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
4211#else
4212 RTCCUINTREG const fEFlags = 0;
4213#endif
4214
4215 /*
4216 * We combine all three parameters and take the (probably) inlined optimized
4217 * code path for the new things specified in a_fWhat.
4218 *
4219 * As a tweak to deal with exits that have INHIBIT_INT/NMI active, causing
4220 * vmxHCImportGuestIntrState to automatically fetch both RIP & RFLAGS, we
4221 * also take the streamlined path when both of these are cleared in fExtrn
4222 * already. vmxHCImportGuestStateInner checks fExtrn before fetching. This
4223 * helps with MWAIT and HLT exits that always inhibit IRQs on many platforms.
4224 */
4225 uint64_t const fWhatToDo = pVCpu->cpum.GstCtx.fExtrn
4226 & ((a_fWhat | a_fDoneLocal | a_fDonePostExit) & HMVMX_CPUMCTX_EXTRN_ALL);
4227 if (RT_LIKELY( ( fWhatToDo == (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit))
4228 || fWhatToDo == ( a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)
4229 & ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)) /* fetch with INHIBIT_INT/NMI */))
4230 && (a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL & ~(a_fDoneLocal | a_fDonePostExit)) != 0 /* just in case */)
4231 {
4232 int const rc = vmxHCImportGuestStateInner< a_fWhat
4233 & HMVMX_CPUMCTX_EXTRN_ALL
4234 & ~(a_fDoneLocal | a_fDonePostExit)>(pVCpu, pVmcsInfo, fEFlags);
4235#ifndef IN_NEM_DARWIN
4236 ASMSetFlags(fEFlags);
4237#endif
4238 return rc;
4239 }
4240
4241#ifndef IN_NEM_DARWIN
4242 ASMSetFlags(fEFlags);
4243#endif
4244
4245 /*
4246 * We shouldn't normally get here, but it may happen when executing
4247 * in the debug run-loops. Typically, everything should already have
4248 * been fetched then. Otherwise call the fallback state import function.
4249 */
4250 if (fWhatToDo == 0)
4251 { /* hope the cause was the debug loop or something similar */ }
4252 else
4253 {
4254 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestStateFallback);
4255 Log11Func(("a_fWhat=%#RX64/%#RX64/%#RX64 fExtrn=%#RX64 => %#RX64 - Taking inefficient code path from %s!\n",
4256 a_fWhat & HMVMX_CPUMCTX_EXTRN_ALL, a_fDoneLocal & HMVMX_CPUMCTX_EXTRN_ALL,
4257 a_fDonePostExit & HMVMX_CPUMCTX_EXTRN_ALL, pVCpu->cpum.GstCtx.fExtrn, fWhatToDo, pszCaller));
4258 return vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, a_fWhat | a_fDoneLocal | a_fDonePostExit);
4259 }
4260 }
4261 return VINF_SUCCESS;
4262}
4263
4264
4265/**
4266 * Check per-VM and per-VCPU force flag actions that require us to go back to
4267 * ring-3 for one reason or another.
4268 *
4269 * @returns Strict VBox status code (i.e. informational status codes too)
4270 * @retval VINF_SUCCESS if we don't have any actions that require going back to
4271 * ring-3.
4272 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
4273 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
4274 * interrupts)
4275 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
4276 * all EMTs to be in ring-3.
4277 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
4278 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
4279 * to the EM loop.
4280 *
4281 * @param pVCpu The cross context virtual CPU structure.
4282 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4283 * @param fStepping Whether we are single-stepping the guest using the
4284 * hypervisor debugger.
4285 *
4286 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
4287 * is no longer in VMX non-root mode.
4288 */
4289static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
4290{
4291#ifndef IN_NEM_DARWIN
4292 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4293#endif
4294
4295 /*
4296 * Update pending interrupts into the APIC's IRR.
4297 */
4298 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4299 APICUpdatePendingInterrupts(pVCpu);
4300
4301 /*
4302 * Anything pending? Should be more likely than not if we're doing a good job.
4303 */
4304 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4305 if ( !fStepping
4306 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
4307 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
4308 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
4309 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4310 return VINF_SUCCESS;
4311
4312 /* Pending PGM C3 sync. */
4313 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4314 {
4315 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4316 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
4317 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
4318 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4319 if (rcStrict != VINF_SUCCESS)
4320 {
4321 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
4322 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
4323 return rcStrict;
4324 }
4325 }
4326
4327 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4328 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4329 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4330 {
4331 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
4332 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4333 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d (fVM=%#RX64 fCpu=%#RX64)\n",
4334 rc, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4335 return rc;
4336 }
4337
4338 /* Pending VM request packets, such as hardware interrupts. */
4339 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4340 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4341 {
4342 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
4343 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4344 return VINF_EM_PENDING_REQUEST;
4345 }
4346
4347 /* Pending PGM pool flushes. */
4348 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4349 {
4350 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
4351 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4352 return VINF_PGM_POOL_FLUSH_PENDING;
4353 }
4354
4355 /* Pending DMA requests. */
4356 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4357 {
4358 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
4359 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4360 return VINF_EM_RAW_TO_R3;
4361 }
4362
4363#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4364 /*
4365 * Pending nested-guest events.
4366 *
4367 * Please note the priority of these events are specified and important.
4368 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4369 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
4370 */
4371 if (fIsNestedGuest)
4372 {
4373 /* Pending nested-guest APIC-write. */
4374 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4375 {
4376 Log4Func(("Pending nested-guest APIC-write\n"));
4377 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
4378 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4379 return rcStrict;
4380 }
4381
4382 /* Pending nested-guest monitor-trap flag (MTF). */
4383 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
4384 {
4385 Log4Func(("Pending nested-guest MTF\n"));
4386 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
4387 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4388 return rcStrict;
4389 }
4390
4391 /* Pending nested-guest VMX-preemption timer expired. */
4392 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
4393 {
4394 Log4Func(("Pending nested-guest preempt timer\n"));
4395 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
4396 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4397 return rcStrict;
4398 }
4399 }
4400#else
4401 NOREF(fIsNestedGuest);
4402#endif
4403
4404 return VINF_SUCCESS;
4405}
4406
4407
4408/**
4409 * Converts any TRPM trap into a pending HM event. This is typically used when
4410 * entering from ring-3 (not longjmp returns).
4411 *
4412 * @param pVCpu The cross context virtual CPU structure.
4413 */
4414static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
4415{
4416 Assert(TRPMHasTrap(pVCpu));
4417 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4418
4419 uint8_t uVector;
4420 TRPMEVENT enmTrpmEvent;
4421 uint32_t uErrCode;
4422 RTGCUINTPTR GCPtrFaultAddress;
4423 uint8_t cbInstr;
4424 bool fIcebp;
4425
4426 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4427 AssertRC(rc);
4428
4429 uint32_t u32IntInfo;
4430 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4431 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4432
4433 rc = TRPMResetTrap(pVCpu);
4434 AssertRC(rc);
4435 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4436 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4437
4438 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4439}
4440
4441
4442/**
4443 * Converts the pending HM event into a TRPM trap.
4444 *
4445 * @param pVCpu The cross context virtual CPU structure.
4446 */
4447static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4448{
4449 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4450
4451 /* If a trap was already pending, we did something wrong! */
4452 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4453
4454 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4455 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4456 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4457
4458 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4459
4460 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4461 AssertRC(rc);
4462
4463 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4464 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4465
4466 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4467 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4468 else
4469 {
4470 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4471 switch (uVectorType)
4472 {
4473 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4474 TRPMSetTrapDueToIcebp(pVCpu);
4475 RT_FALL_THRU();
4476 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4477 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4478 {
4479 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4480 || ( uVector == X86_XCPT_BP /* INT3 */
4481 || uVector == X86_XCPT_OF /* INTO */
4482 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4483 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4484 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4485 break;
4486 }
4487 }
4488 }
4489
4490 /* We're now done converting the pending event. */
4491 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4492}
4493
4494
4495/**
4496 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4497 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4498 *
4499 * @param pVCpu The cross context virtual CPU structure.
4500 * @param pVmcsInfo The VMCS info. object.
4501 */
4502static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4503{
4504 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4505 {
4506 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4507 {
4508 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4509 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4510 AssertRC(rc);
4511 }
4512 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4513}
4514
4515
4516/**
4517 * Clears the interrupt-window exiting control in the VMCS.
4518 *
4519 * @param pVCpu The cross context virtual CPU structure.
4520 * @param pVmcsInfo The VMCS info. object.
4521 */
4522DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4523{
4524 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4525 {
4526 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4527 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4528 AssertRC(rc);
4529 }
4530}
4531
4532
4533/**
4534 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4535 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4536 *
4537 * @param pVCpu The cross context virtual CPU structure.
4538 * @param pVmcsInfo The VMCS info. object.
4539 */
4540static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4541{
4542 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4543 {
4544 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4545 {
4546 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4547 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4548 AssertRC(rc);
4549 Log4Func(("Setup NMI-window exiting\n"));
4550 }
4551 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4552}
4553
4554
4555/**
4556 * Clears the NMI-window exiting control in the VMCS.
4557 *
4558 * @param pVCpu The cross context virtual CPU structure.
4559 * @param pVmcsInfo The VMCS info. object.
4560 */
4561DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4562{
4563 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4564 {
4565 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4566 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4567 AssertRC(rc);
4568 }
4569}
4570
4571
4572/**
4573 * Injects an event into the guest upon VM-entry by updating the relevant fields
4574 * in the VM-entry area in the VMCS.
4575 *
4576 * @returns Strict VBox status code (i.e. informational status codes too).
4577 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4578 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4579 *
4580 * @param pVCpu The cross context virtual CPU structure.
4581 * @param pVmcsInfo The VMCS info object.
4582 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
4583 * @param pEvent The event being injected.
4584 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4585 * will be updated if necessary. This cannot not be NULL.
4586 * @param fStepping Whether we're single-stepping guest execution and should
4587 * return VINF_EM_DBG_STEPPED if the event is injected
4588 * directly (registers modified by us, not by hardware on
4589 * VM-entry).
4590 */
4591static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent,
4592 bool fStepping, uint32_t *pfIntrState)
4593{
4594 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4595 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4596 Assert(pfIntrState);
4597
4598#ifdef IN_NEM_DARWIN
4599 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4600#endif
4601
4602 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4603 uint32_t u32IntInfo = pEvent->u64IntInfo;
4604 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4605 uint32_t const cbInstr = pEvent->cbInstr;
4606 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4607 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4608 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4609
4610#ifdef VBOX_STRICT
4611 /*
4612 * Validate the error-code-valid bit for hardware exceptions.
4613 * No error codes for exceptions in real-mode.
4614 *
4615 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4616 */
4617 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4618 && !CPUMIsGuestInRealModeEx(pCtx))
4619 {
4620 switch (uVector)
4621 {
4622 case X86_XCPT_PF:
4623 case X86_XCPT_DF:
4624 case X86_XCPT_TS:
4625 case X86_XCPT_NP:
4626 case X86_XCPT_SS:
4627 case X86_XCPT_GP:
4628 case X86_XCPT_AC:
4629 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4630 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4631 RT_FALL_THRU();
4632 default:
4633 break;
4634 }
4635 }
4636
4637 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4638 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4639 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4640#endif
4641
4642 RT_NOREF(uVector);
4643 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4644 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4645 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4646 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4647 {
4648 Assert(uVector <= X86_XCPT_LAST);
4649 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4650 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4651 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4652 }
4653 else
4654 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4655
4656 /*
4657 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4658 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4659 * interrupt handler in the (real-mode) guest.
4660 *
4661 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4662 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4663 */
4664 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4665 {
4666#ifndef IN_NEM_DARWIN
4667 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4668#endif
4669 {
4670 /*
4671 * For CPUs with unrestricted guest execution enabled and with the guest
4672 * in real-mode, we must not set the deliver-error-code bit.
4673 *
4674 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4675 */
4676 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4677 }
4678#ifndef IN_NEM_DARWIN
4679 else
4680 {
4681 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4682 Assert(PDMVmmDevHeapIsEnabled(pVM));
4683 Assert(pVM->hm.s.vmx.pRealModeTSS);
4684 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4685
4686 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4687 int rc2 = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4688 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4689 AssertRCReturn(rc2, rc2);
4690
4691 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4692 size_t const cbIdtEntry = sizeof(X86IDTR16);
4693 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4694 {
4695 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4696 if (uVector == X86_XCPT_DF)
4697 return VINF_EM_RESET;
4698
4699 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4700 No error codes for exceptions in real-mode. */
4701 if (uVector == X86_XCPT_GP)
4702 {
4703 static HMEVENT const s_EventXcptDf
4704 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4705 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4706 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4707 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4708 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptDf, fStepping, pfIntrState);
4709 }
4710
4711 /*
4712 * If we're injecting an event with no valid IDT entry, inject a #GP.
4713 * No error codes for exceptions in real-mode.
4714 *
4715 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4716 */
4717 static HMEVENT const s_EventXcptGp
4718 = HMEVENT_INIT_ONLY_INT_INFO( RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4719 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4720 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4721 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1));
4722 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &s_EventXcptGp, fStepping, pfIntrState);
4723 }
4724
4725 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4726 uint16_t uGuestIp = pCtx->ip;
4727 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4728 {
4729 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4730 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4731 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4732 }
4733 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4734 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4735
4736 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4737 X86IDTR16 IdtEntry;
4738 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4739 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4740 AssertRCReturn(rc2, rc2);
4741
4742 /* Construct the stack frame for the interrupt/exception handler. */
4743 VBOXSTRICTRC rcStrict;
4744 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, (uint16_t)pCtx->eflags.u);
4745 if (rcStrict == VINF_SUCCESS)
4746 {
4747 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4748 if (rcStrict == VINF_SUCCESS)
4749 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4750 }
4751
4752 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4753 if (rcStrict == VINF_SUCCESS)
4754 {
4755 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4756 pCtx->rip = IdtEntry.offSel;
4757 pCtx->cs.Sel = IdtEntry.uSel;
4758 pCtx->cs.ValidSel = IdtEntry.uSel;
4759 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4760 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4761 && uVector == X86_XCPT_PF)
4762 pCtx->cr2 = GCPtrFault;
4763
4764 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4765 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4766 | HM_CHANGED_GUEST_RSP);
4767
4768 /*
4769 * If we delivered a hardware exception (other than an NMI) and if there was
4770 * block-by-STI in effect, we should clear it.
4771 */
4772 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4773 {
4774 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4775 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4776 Log4Func(("Clearing inhibition due to STI\n"));
4777 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4778 }
4779
4780 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4781 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4782
4783 /*
4784 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4785 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4786 */
4787 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4788
4789 /*
4790 * If we eventually support nested-guest execution without unrestricted guest execution,
4791 * we should set fInterceptEvents here.
4792 */
4793 Assert(!fIsNestedGuest);
4794
4795 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4796 if (fStepping)
4797 rcStrict = VINF_EM_DBG_STEPPED;
4798 }
4799 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4800 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4801 return rcStrict;
4802 }
4803#else
4804 RT_NOREF(pVmcsInfo);
4805#endif
4806 }
4807
4808 /*
4809 * Validate.
4810 */
4811 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4812 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4813
4814 /*
4815 * Inject the event into the VMCS.
4816 */
4817 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4818 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4819 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4820 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4821 AssertRC(rc);
4822
4823 /*
4824 * Update guest CR2 if this is a page-fault.
4825 */
4826 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4827 pCtx->cr2 = GCPtrFault;
4828
4829 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4830 return VINF_SUCCESS;
4831}
4832
4833
4834/**
4835 * Evaluates the event to be delivered to the guest and sets it as the pending
4836 * event.
4837 *
4838 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4839 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4840 * NOT restore these force-flags.
4841 *
4842 * @returns Strict VBox status code (i.e. informational status codes too).
4843 * @param pVCpu The cross context virtual CPU structure.
4844 * @param pVmcsInfo The VMCS information structure.
4845 * @param fIsNestedGuest Flag whether the evaluation happens for a nested guest.
4846 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4847 */
4848static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4849{
4850 Assert(pfIntrState);
4851 Assert(!TRPMHasTrap(pVCpu));
4852
4853 /*
4854 * Compute/update guest-interruptibility state related FFs.
4855 * The FFs will be used below while evaluating events to be injected.
4856 */
4857 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4858
4859 /*
4860 * Evaluate if a new event needs to be injected.
4861 * An event that's already pending has already performed all necessary checks.
4862 */
4863 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4864 && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
4865 {
4866 /** @todo SMI. SMIs take priority over NMIs. */
4867
4868 /*
4869 * NMIs.
4870 * NMIs take priority over external interrupts.
4871 */
4872#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4873 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4874#endif
4875 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4876 {
4877 /*
4878 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4879 *
4880 * For a nested-guest, the FF always indicates the outer guest's ability to
4881 * receive an NMI while the guest-interruptibility state bit depends on whether
4882 * the nested-hypervisor is using virtual-NMIs.
4883 */
4884 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
4885 {
4886#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4887 if ( fIsNestedGuest
4888 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4889 return IEMExecVmxVmexitXcptNmi(pVCpu);
4890#endif
4891 vmxHCSetPendingXcptNmi(pVCpu);
4892 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4893 Log4Func(("NMI pending injection\n"));
4894
4895 /* We've injected the NMI, bail. */
4896 return VINF_SUCCESS;
4897 }
4898 if (!fIsNestedGuest)
4899 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4900 }
4901
4902 /*
4903 * External interrupts (PIC/APIC).
4904 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4905 * We cannot re-request the interrupt from the controller again.
4906 */
4907 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4908 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4909 {
4910 Assert(!DBGFIsStepping(pVCpu));
4911 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4912 AssertRC(rc);
4913
4914 /*
4915 * We must not check EFLAGS directly when executing a nested-guest, use
4916 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4917 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4918 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4919 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4920 *
4921 * See Intel spec. 25.4.1 "Event Blocking".
4922 */
4923 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4924 {
4925#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4926 if ( fIsNestedGuest
4927 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4928 {
4929 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4930 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4931 return rcStrict;
4932 }
4933#endif
4934 uint8_t u8Interrupt;
4935 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4936 if (RT_SUCCESS(rc))
4937 {
4938#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4939 if ( fIsNestedGuest
4940 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4941 {
4942 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4943 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4944 return rcStrict;
4945 }
4946#endif
4947 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4948 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4949 }
4950 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4951 {
4952 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4953
4954 if ( !fIsNestedGuest
4955 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4956 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4957 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4958
4959 /*
4960 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4961 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4962 * need to re-set this force-flag here.
4963 */
4964 }
4965 else
4966 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4967
4968 /* We've injected the interrupt or taken necessary action, bail. */
4969 return VINF_SUCCESS;
4970 }
4971 if (!fIsNestedGuest)
4972 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4973 }
4974 }
4975 else if (!fIsNestedGuest)
4976 {
4977 /*
4978 * An event is being injected or we are in an interrupt shadow. Check if another event is
4979 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4980 * the pending event.
4981 */
4982 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4983 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4984 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4985 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4986 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4987 }
4988 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4989
4990 return VINF_SUCCESS;
4991}
4992
4993
4994/**
4995 * Injects any pending events into the guest if the guest is in a state to
4996 * receive them.
4997 *
4998 * @returns Strict VBox status code (i.e. informational status codes too).
4999 * @param pVCpu The cross context virtual CPU structure.
5000 * @param pVmcsInfo The VMCS information structure.
5001 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
5002 * @param fIntrState The VT-x guest-interruptibility state.
5003 * @param fStepping Whether we are single-stepping the guest using the
5004 * hypervisor debugger and should return
5005 * VINF_EM_DBG_STEPPED if the event was dispatched
5006 * directly.
5007 */
5008static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest,
5009 uint32_t fIntrState, bool fStepping)
5010{
5011 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
5012#ifndef IN_NEM_DARWIN
5013 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5014#endif
5015
5016#ifdef VBOX_STRICT
5017 /*
5018 * Verify guest-interruptibility state.
5019 *
5020 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
5021 * since injecting an event may modify the interruptibility state and we must thus always
5022 * use fIntrState.
5023 */
5024 {
5025 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5026 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
5027 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
5028 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5029 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
5030 Assert(!TRPMHasTrap(pVCpu));
5031 NOREF(fBlockMovSS); NOREF(fBlockSti);
5032 }
5033#endif
5034
5035 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5036 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
5037 {
5038 /*
5039 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
5040 * pending even while injecting an event and in this case, we want a VM-exit as soon as
5041 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
5042 *
5043 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
5044 */
5045 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
5046#ifdef VBOX_STRICT
5047 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5048 {
5049 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5050 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5051 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5052 }
5053 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5054 {
5055 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
5056 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
5057 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
5058 }
5059#endif
5060 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5061 uIntType));
5062
5063 /*
5064 * Inject the event and get any changes to the guest-interruptibility state.
5065 *
5066 * The guest-interruptibility state may need to be updated if we inject the event
5067 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
5068 */
5069 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
5070 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
5071
5072 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5073 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
5074 else
5075 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
5076 }
5077
5078 /*
5079 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
5080 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
5081 */
5082 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5083 && !fIsNestedGuest)
5084 {
5085 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5086
5087 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
5088 {
5089 /*
5090 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
5091 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
5092 */
5093 Assert(!DBGFIsStepping(pVCpu));
5094 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_TF);
5095 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
5096 fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
5097 AssertRC(rc);
5098 }
5099 else
5100 {
5101 /*
5102 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
5103 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
5104 * we take care of this case in vmxHCExportSharedDebugState and also the case if
5105 * we use MTF, so just make sure it's called before executing guest-code.
5106 */
5107 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
5108 }
5109 }
5110 /* else: for nested-guest currently handling while merging controls. */
5111
5112 /*
5113 * Finally, update the guest-interruptibility state.
5114 *
5115 * This is required for the real-on-v86 software interrupt injection, for
5116 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
5117 */
5118 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5119 AssertRC(rc);
5120
5121 /*
5122 * There's no need to clear the VM-entry interruption-information field here if we're not
5123 * injecting anything. VT-x clears the valid bit on every VM-exit.
5124 *
5125 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5126 */
5127
5128 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
5129 return rcStrict;
5130}
5131
5132
5133/**
5134 * Tries to determine what part of the guest-state VT-x has deemed as invalid
5135 * and update error record fields accordingly.
5136 *
5137 * @returns VMX_IGS_* error codes.
5138 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
5139 * wrong with the guest state.
5140 *
5141 * @param pVCpu The cross context virtual CPU structure.
5142 * @param pVmcsInfo The VMCS info. object.
5143 *
5144 * @remarks This function assumes our cache of the VMCS controls
5145 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
5146 */
5147static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5148{
5149#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
5150#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { uError = (err); break; } else do { } while (0)
5151
5152 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5153 uint32_t uError = VMX_IGS_ERROR;
5154 uint32_t u32IntrState = 0;
5155#ifndef IN_NEM_DARWIN
5156 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5157 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
5158#else
5159 bool const fUnrestrictedGuest = true;
5160#endif
5161 do
5162 {
5163 int rc;
5164
5165 /*
5166 * Guest-interruptibility state.
5167 *
5168 * Read this first so that any check that fails prior to those that actually
5169 * require the guest-interruptibility state would still reflect the correct
5170 * VMCS value and avoids causing further confusion.
5171 */
5172 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
5173 AssertRC(rc);
5174
5175 uint32_t u32Val;
5176 uint64_t u64Val;
5177
5178 /*
5179 * CR0.
5180 */
5181 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5182 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
5183 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
5184 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
5185 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
5186 if (fUnrestrictedGuest)
5187 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5188
5189 uint64_t u64GuestCr0;
5190 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
5191 AssertRC(rc);
5192 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
5193 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
5194 if ( !fUnrestrictedGuest
5195 && (u64GuestCr0 & X86_CR0_PG)
5196 && !(u64GuestCr0 & X86_CR0_PE))
5197 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
5198
5199 /*
5200 * CR4.
5201 */
5202 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
5203 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
5204 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
5205
5206 uint64_t u64GuestCr4;
5207 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
5208 AssertRC(rc);
5209 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
5210 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
5211
5212 /*
5213 * IA32_DEBUGCTL MSR.
5214 */
5215 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
5216 AssertRC(rc);
5217 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5218 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
5219 {
5220 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
5221 }
5222 uint64_t u64DebugCtlMsr = u64Val;
5223
5224#ifdef VBOX_STRICT
5225 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
5226 AssertRC(rc);
5227 Assert(u32Val == pVmcsInfo->u32EntryCtls);
5228#endif
5229 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5230
5231 /*
5232 * RIP and RFLAGS.
5233 */
5234 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5235 AssertRC(rc);
5236 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
5237 if ( !fLongModeGuest
5238 || !pCtx->cs.Attr.n.u1Long)
5239 {
5240 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
5241 }
5242 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
5243 * must be identical if the "IA-32e mode guest" VM-entry
5244 * control is 1 and CS.L is 1. No check applies if the
5245 * CPU supports 64 linear-address bits. */
5246
5247 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
5248 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5249 AssertRC(rc);
5250 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
5251 VMX_IGS_RFLAGS_RESERVED);
5252 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
5253 uint32_t const u32Eflags = u64Val;
5254
5255 if ( fLongModeGuest
5256 || ( fUnrestrictedGuest
5257 && !(u64GuestCr0 & X86_CR0_PE)))
5258 {
5259 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
5260 }
5261
5262 uint32_t u32EntryInfo;
5263 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
5264 AssertRC(rc);
5265 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5266 {
5267 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
5268 }
5269
5270 /*
5271 * 64-bit checks.
5272 */
5273 if (fLongModeGuest)
5274 {
5275 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
5276 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
5277 }
5278
5279 if ( !fLongModeGuest
5280 && (u64GuestCr4 & X86_CR4_PCIDE))
5281 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
5282
5283 /** @todo CR3 field must be such that bits 63:52 and bits in the range
5284 * 51:32 beyond the processor's physical-address width are 0. */
5285
5286 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5287 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
5288 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
5289
5290#ifndef IN_NEM_DARWIN
5291 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
5292 AssertRC(rc);
5293 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
5294
5295 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
5296 AssertRC(rc);
5297 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
5298#endif
5299
5300 /*
5301 * PERF_GLOBAL MSR.
5302 */
5303 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
5304 {
5305 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
5306 AssertRC(rc);
5307 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
5308 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
5309 }
5310
5311 /*
5312 * PAT MSR.
5313 */
5314 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5315 {
5316 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
5317 AssertRC(rc);
5318 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
5319 for (unsigned i = 0; i < 8; i++)
5320 {
5321 uint8_t u8Val = (u64Val & 0xff);
5322 if ( u8Val != 0 /* UC */
5323 && u8Val != 1 /* WC */
5324 && u8Val != 4 /* WT */
5325 && u8Val != 5 /* WP */
5326 && u8Val != 6 /* WB */
5327 && u8Val != 7 /* UC- */)
5328 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
5329 u64Val >>= 8;
5330 }
5331 }
5332
5333 /*
5334 * EFER MSR.
5335 */
5336 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5337 {
5338 Assert(g_fHmVmxSupportsVmcsEfer);
5339 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
5340 AssertRC(rc);
5341 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
5342 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
5343 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
5344 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
5345 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
5346 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
5347 * iemVmxVmentryCheckGuestState(). */
5348 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5349 || !(u64GuestCr0 & X86_CR0_PG)
5350 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
5351 VMX_IGS_EFER_LMA_LME_MISMATCH);
5352 }
5353
5354 /*
5355 * Segment registers.
5356 */
5357 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5358 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
5359 if (!(u32Eflags & X86_EFL_VM))
5360 {
5361 /* CS */
5362 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
5363 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
5364 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
5365 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5366 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5367 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
5368 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
5369 /* CS cannot be loaded with NULL in protected mode. */
5370 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
5371 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
5372 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5373 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
5374 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5375 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
5376 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
5377 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
5378 else
5379 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
5380
5381 /* SS */
5382 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5383 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
5384 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
5385 if ( !(pCtx->cr0 & X86_CR0_PE)
5386 || pCtx->cs.Attr.n.u4Type == 3)
5387 {
5388 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
5389 }
5390
5391 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5392 {
5393 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
5394 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
5395 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
5396 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
5397 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5398 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5399 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
5400 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
5401 }
5402
5403 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
5404 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5405 {
5406 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
5407 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
5408 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5409 || pCtx->ds.Attr.n.u4Type > 11
5410 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5411 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
5412 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
5413 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5414 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5415 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
5416 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
5417 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5418 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
5419 }
5420 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5421 {
5422 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
5423 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
5424 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5425 || pCtx->es.Attr.n.u4Type > 11
5426 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
5427 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
5428 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5429 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5430 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5431 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5432 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5433 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5434 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5435 }
5436 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5437 {
5438 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5439 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5440 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5441 || pCtx->fs.Attr.n.u4Type > 11
5442 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5443 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5444 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5445 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5446 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5447 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5448 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5449 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5450 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5451 }
5452 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5453 {
5454 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5455 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5456 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5457 || pCtx->gs.Attr.n.u4Type > 11
5458 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5459 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5460 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5461 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5462 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5463 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5464 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5465 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5466 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5467 }
5468 /* 64-bit capable CPUs. */
5469 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5470 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5471 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5472 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5473 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5474 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5475 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5476 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5477 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5478 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5479 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5480 }
5481 else
5482 {
5483 /* V86 mode checks. */
5484 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5485 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5486 {
5487 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5488 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5489 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5490 }
5491 else
5492 {
5493 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5494 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5495 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5496 }
5497
5498 /* CS */
5499 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5500 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5501 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5502 /* SS */
5503 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5504 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5505 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5506 /* DS */
5507 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5508 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5509 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5510 /* ES */
5511 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5512 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5513 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5514 /* FS */
5515 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5516 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5517 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5518 /* GS */
5519 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5520 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5521 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5522 /* 64-bit capable CPUs. */
5523 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5524 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5525 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5526 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5527 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5528 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5529 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5530 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5531 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5532 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5533 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5534 }
5535
5536 /*
5537 * TR.
5538 */
5539 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5540 /* 64-bit capable CPUs. */
5541 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5542 if (fLongModeGuest)
5543 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5544 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5545 else
5546 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5547 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5548 VMX_IGS_TR_ATTR_TYPE_INVALID);
5549 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5550 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5551 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5552 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5553 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5554 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5555 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5556 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5557
5558 /*
5559 * GDTR and IDTR (64-bit capable checks).
5560 */
5561 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5562 AssertRC(rc);
5563 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5564
5565 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5566 AssertRC(rc);
5567 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5568
5569 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5570 AssertRC(rc);
5571 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5572
5573 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5574 AssertRC(rc);
5575 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5576
5577 /*
5578 * Guest Non-Register State.
5579 */
5580 /* Activity State. */
5581 uint32_t u32ActivityState;
5582 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5583 AssertRC(rc);
5584 HMVMX_CHECK_BREAK( !u32ActivityState
5585 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5586 VMX_IGS_ACTIVITY_STATE_INVALID);
5587 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5588 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5589
5590 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5591 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5592 {
5593 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5594 }
5595
5596 /** @todo Activity state and injecting interrupts. Left as a todo since we
5597 * currently don't use activity states but ACTIVE. */
5598
5599 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5600 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5601
5602 /* Guest interruptibility-state. */
5603 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5604 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5605 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5606 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5607 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5608 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5609 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5610 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5611 {
5612 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5613 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5614 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5615 }
5616 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5617 {
5618 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5619 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5620 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5621 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5622 }
5623 /** @todo Assumes the processor is not in SMM. */
5624 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5625 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5626 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5627 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5628 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5629 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5630 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5631 {
5632 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5633 }
5634
5635 /* Pending debug exceptions. */
5636 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5637 AssertRC(rc);
5638 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5639 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5640 u32Val = u64Val; /* For pending debug exceptions checks below. */
5641
5642 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5643 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5644 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5645 {
5646 if ( (u32Eflags & X86_EFL_TF)
5647 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5648 {
5649 /* Bit 14 is PendingDebug.BS. */
5650 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5651 }
5652 if ( !(u32Eflags & X86_EFL_TF)
5653 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5654 {
5655 /* Bit 14 is PendingDebug.BS. */
5656 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5657 }
5658 }
5659
5660#ifndef IN_NEM_DARWIN
5661 /* VMCS link pointer. */
5662 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5663 AssertRC(rc);
5664 if (u64Val != UINT64_C(0xffffffffffffffff))
5665 {
5666 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5667 /** @todo Bits beyond the processor's physical-address width MBZ. */
5668 /** @todo SMM checks. */
5669 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5670 Assert(pVmcsInfo->pvShadowVmcs);
5671 VMXVMCSREVID VmcsRevId;
5672 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5673 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5674 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5675 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5676 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5677 }
5678
5679 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5680 * not using nested paging? */
5681 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5682 && !fLongModeGuest
5683 && CPUMIsGuestInPAEModeEx(pCtx))
5684 {
5685 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5686 AssertRC(rc);
5687 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5688
5689 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5690 AssertRC(rc);
5691 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5692
5693 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5694 AssertRC(rc);
5695 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5696
5697 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5698 AssertRC(rc);
5699 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5700 }
5701#endif
5702
5703 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5704 if (uError == VMX_IGS_ERROR)
5705 uError = VMX_IGS_REASON_NOT_FOUND;
5706 } while (0);
5707
5708 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5709 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5710 return uError;
5711
5712#undef HMVMX_ERROR_BREAK
5713#undef HMVMX_CHECK_BREAK
5714}
5715
5716
5717#ifndef HMVMX_USE_FUNCTION_TABLE
5718/**
5719 * Handles a guest VM-exit from hardware-assisted VMX execution.
5720 *
5721 * @returns Strict VBox status code (i.e. informational status codes too).
5722 * @param pVCpu The cross context virtual CPU structure.
5723 * @param pVmxTransient The VMX-transient structure.
5724 */
5725DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5726{
5727#ifdef DEBUG_ramshankar
5728# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5729 do { \
5730 if (a_fSave != 0) \
5731 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5732 VBOXSTRICTRC rcStrict = a_CallExpr; \
5733 if (a_fSave != 0) \
5734 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5735 return rcStrict; \
5736 } while (0)
5737#else
5738# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5739#endif
5740 uint32_t const uExitReason = pVmxTransient->uExitReason;
5741 switch (uExitReason)
5742 {
5743 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5744 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5745 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5746 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5747 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5748 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5749 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5750 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5751 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5752 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5753 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5754 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5755 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5756 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5757 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5758 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5759 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5760 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5761 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5762 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5763 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5764 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5765 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5766 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5767 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5768 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5769 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5770 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5771 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5772 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5773#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5774 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5775 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5776 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5777 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5778 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5779 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5780 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5781 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5782 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5783 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5784#else
5785 case VMX_EXIT_VMCLEAR:
5786 case VMX_EXIT_VMLAUNCH:
5787 case VMX_EXIT_VMPTRLD:
5788 case VMX_EXIT_VMPTRST:
5789 case VMX_EXIT_VMREAD:
5790 case VMX_EXIT_VMRESUME:
5791 case VMX_EXIT_VMWRITE:
5792 case VMX_EXIT_VMXOFF:
5793 case VMX_EXIT_VMXON:
5794 case VMX_EXIT_INVVPID:
5795 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5796#endif
5797#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5798 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5799#else
5800 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5801#endif
5802
5803 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5804 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5805 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5806
5807 case VMX_EXIT_INIT_SIGNAL:
5808 case VMX_EXIT_SIPI:
5809 case VMX_EXIT_IO_SMI:
5810 case VMX_EXIT_SMI:
5811 case VMX_EXIT_ERR_MSR_LOAD:
5812 case VMX_EXIT_ERR_MACHINE_CHECK:
5813 case VMX_EXIT_PML_FULL:
5814 case VMX_EXIT_VIRTUALIZED_EOI:
5815 case VMX_EXIT_GDTR_IDTR_ACCESS:
5816 case VMX_EXIT_LDTR_TR_ACCESS:
5817 case VMX_EXIT_APIC_WRITE:
5818 case VMX_EXIT_RDRAND:
5819 case VMX_EXIT_RSM:
5820 case VMX_EXIT_VMFUNC:
5821 case VMX_EXIT_ENCLS:
5822 case VMX_EXIT_RDSEED:
5823 case VMX_EXIT_XSAVES:
5824 case VMX_EXIT_XRSTORS:
5825 case VMX_EXIT_UMWAIT:
5826 case VMX_EXIT_TPAUSE:
5827 case VMX_EXIT_LOADIWKEY:
5828 default:
5829 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5830 }
5831#undef VMEXIT_CALL_RET
5832}
5833#endif /* !HMVMX_USE_FUNCTION_TABLE */
5834
5835
5836#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5837/**
5838 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5839 *
5840 * @returns Strict VBox status code (i.e. informational status codes too).
5841 * @param pVCpu The cross context virtual CPU structure.
5842 * @param pVmxTransient The VMX-transient structure.
5843 */
5844DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5845{
5846#ifdef DEBUG_ramshankar
5847# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5848 do { \
5849 if (a_fSave != 0) \
5850 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__); \
5851 VBOXSTRICTRC rcStrict = a_CallExpr; \
5852 return rcStrict; \
5853 } while (0)
5854#else
5855# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5856#endif
5857
5858 uint32_t const uExitReason = pVmxTransient->uExitReason;
5859 switch (uExitReason)
5860 {
5861# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5862 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient));
5863 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolationNested(pVCpu, pVmxTransient));
5864# else
5865 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5866 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5867# endif
5868 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient));
5869 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstrNested(pVCpu, pVmxTransient));
5870 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHltNested(pVCpu, pVmxTransient));
5871
5872 /*
5873 * We shouldn't direct host physical interrupts to the nested-guest.
5874 */
5875 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5876
5877 /*
5878 * Instructions that cause VM-exits unconditionally or the condition is
5879 * always taken solely from the nested hypervisor (meaning if the VM-exit
5880 * happens, it's guaranteed to be a nested-guest VM-exit).
5881 *
5882 * - Provides VM-exit instruction length ONLY.
5883 */
5884 case VMX_EXIT_CPUID: /* Unconditional. */
5885 case VMX_EXIT_VMCALL:
5886 case VMX_EXIT_GETSEC:
5887 case VMX_EXIT_INVD:
5888 case VMX_EXIT_XSETBV:
5889 case VMX_EXIT_VMLAUNCH:
5890 case VMX_EXIT_VMRESUME:
5891 case VMX_EXIT_VMXOFF:
5892 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5893 case VMX_EXIT_VMFUNC:
5894 VMEXIT_CALL_RET(0, vmxHCExitInstrNested(pVCpu, pVmxTransient));
5895
5896 /*
5897 * Instructions that cause VM-exits unconditionally or the condition is
5898 * always taken solely from the nested hypervisor (meaning if the VM-exit
5899 * happens, it's guaranteed to be a nested-guest VM-exit).
5900 *
5901 * - Provides VM-exit instruction length.
5902 * - Provides VM-exit information.
5903 * - Optionally provides Exit qualification.
5904 *
5905 * Since Exit qualification is 0 for all VM-exits where it is not
5906 * applicable, reading and passing it to the guest should produce
5907 * defined behavior.
5908 *
5909 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5910 */
5911 case VMX_EXIT_INVEPT: /* Unconditional. */
5912 case VMX_EXIT_INVVPID:
5913 case VMX_EXIT_VMCLEAR:
5914 case VMX_EXIT_VMPTRLD:
5915 case VMX_EXIT_VMPTRST:
5916 case VMX_EXIT_VMXON:
5917 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5918 case VMX_EXIT_LDTR_TR_ACCESS:
5919 case VMX_EXIT_RDRAND:
5920 case VMX_EXIT_RDSEED:
5921 case VMX_EXIT_XSAVES:
5922 case VMX_EXIT_XRSTORS:
5923 case VMX_EXIT_UMWAIT:
5924 case VMX_EXIT_TPAUSE:
5925 VMEXIT_CALL_RET(0, vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient));
5926
5927 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtscNested(pVCpu, pVmxTransient));
5928 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscpNested(pVCpu, pVmxTransient));
5929 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsrNested(pVCpu, pVmxTransient));
5930 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsrNested(pVCpu, pVmxTransient));
5931 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpgNested(pVCpu, pVmxTransient));
5932 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcidNested(pVCpu, pVmxTransient));
5933 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient));
5934 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvdNested(pVCpu, pVmxTransient));
5935 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtfNested(pVCpu, pVmxTransient));
5936 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccessNested(pVCpu, pVmxTransient));
5937 case VMX_EXIT_APIC_WRITE: VMEXIT_CALL_RET(0, vmxHCExitApicWriteNested(pVCpu, pVmxTransient));
5938 case VMX_EXIT_VIRTUALIZED_EOI: VMEXIT_CALL_RET(0, vmxHCExitVirtEoiNested(pVCpu, pVmxTransient));
5939 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRxNested(pVCpu, pVmxTransient));
5940 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindowNested(pVCpu, pVmxTransient));
5941 case VMX_EXIT_NMI_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitNmiWindowNested(pVCpu, pVmxTransient));
5942 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient));
5943 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwaitNested(pVCpu, pVmxTransient));
5944 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitorNested(pVCpu, pVmxTransient));
5945 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPauseNested(pVCpu, pVmxTransient));
5946
5947 case VMX_EXIT_PREEMPT_TIMER:
5948 {
5949 /** @todo NSTVMX: Preempt timer. */
5950 VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5951 }
5952
5953 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRxNested(pVCpu, pVmxTransient));
5954 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmcNested(pVCpu, pVmxTransient));
5955
5956 case VMX_EXIT_VMREAD:
5957 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient));
5958
5959 case VMX_EXIT_TRIPLE_FAULT: VMEXIT_CALL_RET(0, vmxHCExitTripleFaultNested(pVCpu, pVmxTransient));
5960 case VMX_EXIT_ERR_INVALID_GUEST_STATE: VMEXIT_CALL_RET(0, vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient));
5961
5962 case VMX_EXIT_INIT_SIGNAL:
5963 case VMX_EXIT_SIPI:
5964 case VMX_EXIT_IO_SMI:
5965 case VMX_EXIT_SMI:
5966 case VMX_EXIT_ERR_MSR_LOAD:
5967 case VMX_EXIT_ERR_MACHINE_CHECK:
5968 case VMX_EXIT_PML_FULL:
5969 case VMX_EXIT_RSM:
5970 default:
5971 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5972 }
5973#undef VMEXIT_CALL_RET
5974}
5975#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5976
5977
5978/** @name VM-exit helpers.
5979 * @{
5980 */
5981/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5982/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5983/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5984
5985/** Macro for VM-exits called unexpectedly. */
5986#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5987 do { \
5988 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5989 return VERR_VMX_UNEXPECTED_EXIT; \
5990 } while (0)
5991
5992#ifdef VBOX_STRICT
5993# ifndef IN_NEM_DARWIN
5994/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5995# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5996 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5997
5998# define HMVMX_ASSERT_PREEMPT_CPUID() \
5999 do { \
6000 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6001 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6002 } while (0)
6003
6004# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6005 do { \
6006 AssertPtr((a_pVCpu)); \
6007 AssertPtr((a_pVmxTransient)); \
6008 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6009 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6010 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6011 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6012 Assert((a_pVmxTransient)->pVmcsInfo); \
6013 Assert(ASMIntAreEnabled()); \
6014 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6015 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
6016 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6017 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
6018 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
6019 HMVMX_ASSERT_PREEMPT_CPUID(); \
6020 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6021 } while (0)
6022# else
6023# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
6024# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
6025# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6026 do { \
6027 AssertPtr((a_pVCpu)); \
6028 AssertPtr((a_pVmxTransient)); \
6029 Assert( (a_pVmxTransient)->fVMEntryFailed == false \
6030 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE \
6031 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MSR_LOAD \
6032 || (a_pVmxTransient)->uExitReason == VMX_EXIT_ERR_MACHINE_CHECK); \
6033 Assert((a_pVmxTransient)->pVmcsInfo); \
6034 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
6035 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6036 } while (0)
6037# endif
6038
6039# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6040 do { \
6041 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
6042 Assert((a_pVmxTransient)->fIsNestedGuest); \
6043 } while (0)
6044
6045# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6046 do { \
6047 Log4Func(("\n")); \
6048 } while (0)
6049#else
6050# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6051 do { \
6052 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6053 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
6054 } while (0)
6055
6056# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
6057 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
6058
6059# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
6060#endif
6061
6062#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6063/** Macro that does the necessary privilege checks and intercepted VM-exits for
6064 * guests that attempted to execute a VMX instruction. */
6065# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
6066 do \
6067 { \
6068 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
6069 if (rcStrictTmp == VINF_SUCCESS) \
6070 { /* likely */ } \
6071 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6072 { \
6073 Assert((a_pVCpu)->hm.s.Event.fPending); \
6074 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
6075 return VINF_SUCCESS; \
6076 } \
6077 else \
6078 { \
6079 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
6080 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
6081 } \
6082 } while (0)
6083
6084/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
6085# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
6086 do \
6087 { \
6088 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
6089 (a_pGCPtrEffAddr)); \
6090 if (rcStrictTmp == VINF_SUCCESS) \
6091 { /* likely */ } \
6092 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
6093 { \
6094 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
6095 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
6096 NOREF(uXcptTmp); \
6097 return VINF_SUCCESS; \
6098 } \
6099 else \
6100 { \
6101 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
6102 return rcStrictTmp; \
6103 } \
6104 } while (0)
6105#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6106
6107
6108/**
6109 * Advances the guest RIP by the specified number of bytes.
6110 *
6111 * @param pVCpu The cross context virtual CPU structure.
6112 * @param cbInstr Number of bytes to advance the RIP by.
6113 *
6114 * @remarks No-long-jump zone!!!
6115 */
6116DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
6117{
6118 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
6119
6120 /*
6121 * Advance RIP.
6122 *
6123 * The upper 32 bits are only set when in 64-bit mode, so we have to detect
6124 * when the addition causes a "carry" into the upper half and check whether
6125 * we're in 64-bit and can go on with it or wether we should zap the top
6126 * half. (Note! The 8086, 80186 and 80286 emulation is done exclusively in
6127 * IEM, so we don't need to bother with pre-386 16-bit wraparound.)
6128 *
6129 * See PC wrap around tests in bs3-cpu-weird-1.
6130 */
6131 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
6132 uint64_t const uRipNext = uRipPrev + cbInstr;
6133 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & RT_BIT_64(32))
6134 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6135 pVCpu->cpum.GstCtx.rip = uRipNext;
6136 else
6137 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
6138
6139 /*
6140 * Clear RF and interrupt shadowing.
6141 */
6142 if (RT_LIKELY(!(pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF))))
6143 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
6144 else
6145 {
6146 if ((pVCpu->cpum.GstCtx.eflags.uBoth & (X86_EFL_RF | X86_EFL_TF)) == X86_EFL_TF)
6147 {
6148 /** @todo \#DB - single step. */
6149 }
6150 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
6151 }
6152 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
6153
6154 /* Mark both RIP and RFLAGS as updated. */
6155 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6156}
6157
6158
6159/**
6160 * Advances the guest RIP after reading it from the VMCS.
6161 *
6162 * @returns VBox status code, no informational status codes.
6163 * @param pVCpu The cross context virtual CPU structure.
6164 * @param pVmxTransient The VMX-transient structure.
6165 *
6166 * @remarks No-long-jump zone!!!
6167 */
6168static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6169{
6170 vmxHCReadToTransientSlow<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
6171 /** @todo consider template here after checking callers. */
6172 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6173 AssertRCReturn(rc, rc);
6174
6175 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
6176 return VINF_SUCCESS;
6177}
6178
6179
6180/**
6181 * Handle a condition that occurred while delivering an event through the guest or
6182 * nested-guest IDT.
6183 *
6184 * @returns Strict VBox status code (i.e. informational status codes too).
6185 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6186 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
6187 * to continue execution of the guest which will delivery the \#DF.
6188 * @retval VINF_EM_RESET if we detected a triple-fault condition.
6189 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
6190 *
6191 * @param pVCpu The cross context virtual CPU structure.
6192 * @param pVmxTransient The VMX-transient structure.
6193 *
6194 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6195 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
6196 * is due to an EPT violation, PML full or SPP-related event.
6197 *
6198 * @remarks No-long-jump zone!!!
6199 */
6200static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6201{
6202 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6203 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6204 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6205 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6206 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6207 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
6208
6209 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6210 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6211 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
6212 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
6213 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
6214 {
6215 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
6216 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
6217
6218 /*
6219 * If the event was a software interrupt (generated with INT n) or a software exception
6220 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
6221 * can handle the VM-exit and continue guest execution which will re-execute the
6222 * instruction rather than re-injecting the exception, as that can cause premature
6223 * trips to ring-3 before injection and involve TRPM which currently has no way of
6224 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
6225 * the problem).
6226 */
6227 IEMXCPTRAISE enmRaise;
6228 IEMXCPTRAISEINFO fRaiseInfo;
6229 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6230 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6231 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6232 {
6233 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
6234 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6235 }
6236 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
6237 {
6238 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
6239 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
6240 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
6241
6242 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
6243 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
6244
6245 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
6246
6247 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
6248 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
6249 {
6250 pVmxTransient->fVectoringPF = true;
6251 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6252 }
6253 }
6254 else
6255 {
6256 /*
6257 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
6258 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
6259 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
6260 */
6261 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6262 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6263 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
6264 enmRaise = IEMXCPTRAISE_PREV_EVENT;
6265 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
6266 }
6267
6268 /*
6269 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
6270 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
6271 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
6272 * subsequent VM-entry would fail, see @bugref{7445}.
6273 *
6274 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
6275 */
6276 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
6277 && enmRaise == IEMXCPTRAISE_PREV_EVENT
6278 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6279 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6280 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6281
6282 switch (enmRaise)
6283 {
6284 case IEMXCPTRAISE_CURRENT_XCPT:
6285 {
6286 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
6287 Assert(rcStrict == VINF_SUCCESS);
6288 break;
6289 }
6290
6291 case IEMXCPTRAISE_PREV_EVENT:
6292 {
6293 uint32_t u32ErrCode;
6294 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
6295 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
6296 else
6297 u32ErrCode = 0;
6298
6299 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
6300 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
6301 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */, u32ErrCode,
6302 pVCpu->cpum.GstCtx.cr2);
6303
6304 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6305 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
6306 Assert(rcStrict == VINF_SUCCESS);
6307 break;
6308 }
6309
6310 case IEMXCPTRAISE_REEXEC_INSTR:
6311 Assert(rcStrict == VINF_SUCCESS);
6312 break;
6313
6314 case IEMXCPTRAISE_DOUBLE_FAULT:
6315 {
6316 /*
6317 * Determine a vectoring double #PF condition. Used later, when PGM evaluates the
6318 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
6319 */
6320 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6321 {
6322 pVmxTransient->fVectoringDoublePF = true;
6323 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6324 pVCpu->cpum.GstCtx.cr2));
6325 rcStrict = VINF_SUCCESS;
6326 }
6327 else
6328 {
6329 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
6330 vmxHCSetPendingXcptDF(pVCpu);
6331 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6332 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6333 rcStrict = VINF_HM_DOUBLE_FAULT;
6334 }
6335 break;
6336 }
6337
6338 case IEMXCPTRAISE_TRIPLE_FAULT:
6339 {
6340 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
6341 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
6342 rcStrict = VINF_EM_RESET;
6343 break;
6344 }
6345
6346 case IEMXCPTRAISE_CPU_HANG:
6347 {
6348 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6349 rcStrict = VERR_EM_GUEST_CPU_HANG;
6350 break;
6351 }
6352
6353 default:
6354 {
6355 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6356 rcStrict = VERR_VMX_IPE_2;
6357 break;
6358 }
6359 }
6360 }
6361 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6362 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
6363 {
6364 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
6365 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
6366 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
6367 {
6368 /*
6369 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
6370 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6371 * that virtual NMIs remain blocked until the IRET execution is completed.
6372 *
6373 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
6374 */
6375 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6376 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6377 }
6378 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
6379 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
6380 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
6381 {
6382 /*
6383 * Execution of IRET caused an EPT violation, page-modification log-full event or
6384 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
6385 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
6386 * that virtual NMIs remain blocked until the IRET execution is completed.
6387 *
6388 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
6389 */
6390 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
6391 {
6392 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
6393 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
6394 }
6395 }
6396 }
6397
6398 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6399 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6400 return rcStrict;
6401}
6402
6403
6404#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6405/**
6406 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6407 * guest attempting to execute a VMX instruction.
6408 *
6409 * @returns Strict VBox status code (i.e. informational status codes too).
6410 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6411 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6412 *
6413 * @param pVCpu The cross context virtual CPU structure.
6414 * @param uExitReason The VM-exit reason.
6415 *
6416 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6417 * @remarks No-long-jump zone!!!
6418 */
6419static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
6420{
6421 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6422 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6423
6424 /*
6425 * The physical CPU would have already checked the CPU mode/code segment.
6426 * We shall just assert here for paranoia.
6427 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
6428 */
6429 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6430 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6431 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
6432
6433 if (uExitReason == VMX_EXIT_VMXON)
6434 {
6435 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6436
6437 /*
6438 * We check CR4.VMXE because it is required to be always set while in VMX operation
6439 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
6440 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6441 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6442 */
6443 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6444 {
6445 Log4Func(("CR4.VMXE is not set -> #UD\n"));
6446 vmxHCSetPendingXcptUD(pVCpu);
6447 return VINF_HM_PENDING_XCPT;
6448 }
6449 }
6450 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
6451 {
6452 /*
6453 * The guest has not entered VMX operation but attempted to execute a VMX instruction
6454 * (other than VMXON), we need to raise a #UD.
6455 */
6456 Log4Func(("Not in VMX root mode -> #UD\n"));
6457 vmxHCSetPendingXcptUD(pVCpu);
6458 return VINF_HM_PENDING_XCPT;
6459 }
6460
6461 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
6462 return VINF_SUCCESS;
6463}
6464
6465
6466/**
6467 * Decodes the memory operand of an instruction that caused a VM-exit.
6468 *
6469 * The Exit qualification field provides the displacement field for memory
6470 * operand instructions, if any.
6471 *
6472 * @returns Strict VBox status code (i.e. informational status codes too).
6473 * @retval VINF_SUCCESS if the operand was successfully decoded.
6474 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6475 * operand.
6476 * @param pVCpu The cross context virtual CPU structure.
6477 * @param uExitInstrInfo The VM-exit instruction information field.
6478 * @param enmMemAccess The memory operand's access type (read or write).
6479 * @param GCPtrDisp The instruction displacement field, if any. For
6480 * RIP-relative addressing pass RIP + displacement here.
6481 * @param pGCPtrMem Where to store the effective destination memory address.
6482 *
6483 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6484 * virtual-8086 mode hence skips those checks while verifying if the
6485 * segment is valid.
6486 */
6487static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6488 PRTGCPTR pGCPtrMem)
6489{
6490 Assert(pGCPtrMem);
6491 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6492 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6493 | CPUMCTX_EXTRN_CR0);
6494
6495 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6496 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6497 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6498
6499 VMXEXITINSTRINFO ExitInstrInfo;
6500 ExitInstrInfo.u = uExitInstrInfo;
6501 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6502 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6503 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6504 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6505 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6506 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6507 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6508 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6509 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6510
6511 /*
6512 * Validate instruction information.
6513 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6514 */
6515 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6516 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6517 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6518 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6519 AssertLogRelMsgReturn(fIsMemOperand,
6520 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6521
6522 /*
6523 * Compute the complete effective address.
6524 *
6525 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6526 * See AMD spec. 4.5.2 "Segment Registers".
6527 */
6528 RTGCPTR GCPtrMem = GCPtrDisp;
6529 if (fBaseRegValid)
6530 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6531 if (fIdxRegValid)
6532 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6533
6534 RTGCPTR const GCPtrOff = GCPtrMem;
6535 if ( !fIsLongMode
6536 || iSegReg >= X86_SREG_FS)
6537 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6538 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6539
6540 /*
6541 * Validate effective address.
6542 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6543 */
6544 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6545 Assert(cbAccess > 0);
6546 if (fIsLongMode)
6547 {
6548 if (X86_IS_CANONICAL(GCPtrMem))
6549 {
6550 *pGCPtrMem = GCPtrMem;
6551 return VINF_SUCCESS;
6552 }
6553
6554 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6555 * "Data Limit Checks in 64-bit Mode". */
6556 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6557 vmxHCSetPendingXcptGP(pVCpu, 0);
6558 return VINF_HM_PENDING_XCPT;
6559 }
6560
6561 /*
6562 * This is a watered down version of iemMemApplySegment().
6563 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6564 * and segment CPL/DPL checks are skipped.
6565 */
6566 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6567 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6568 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6569
6570 /* Check if the segment is present and usable. */
6571 if ( pSel->Attr.n.u1Present
6572 && !pSel->Attr.n.u1Unusable)
6573 {
6574 Assert(pSel->Attr.n.u1DescType);
6575 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6576 {
6577 /* Check permissions for the data segment. */
6578 if ( enmMemAccess == VMXMEMACCESS_WRITE
6579 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6580 {
6581 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6582 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6583 return VINF_HM_PENDING_XCPT;
6584 }
6585
6586 /* Check limits if it's a normal data segment. */
6587 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6588 {
6589 if ( GCPtrFirst32 > pSel->u32Limit
6590 || GCPtrLast32 > pSel->u32Limit)
6591 {
6592 Log4Func(("Data segment limit exceeded. "
6593 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6594 GCPtrLast32, pSel->u32Limit));
6595 if (iSegReg == X86_SREG_SS)
6596 vmxHCSetPendingXcptSS(pVCpu, 0);
6597 else
6598 vmxHCSetPendingXcptGP(pVCpu, 0);
6599 return VINF_HM_PENDING_XCPT;
6600 }
6601 }
6602 else
6603 {
6604 /* Check limits if it's an expand-down data segment.
6605 Note! The upper boundary is defined by the B bit, not the G bit! */
6606 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6607 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6608 {
6609 Log4Func(("Expand-down data segment limit exceeded. "
6610 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6611 GCPtrLast32, pSel->u32Limit));
6612 if (iSegReg == X86_SREG_SS)
6613 vmxHCSetPendingXcptSS(pVCpu, 0);
6614 else
6615 vmxHCSetPendingXcptGP(pVCpu, 0);
6616 return VINF_HM_PENDING_XCPT;
6617 }
6618 }
6619 }
6620 else
6621 {
6622 /* Check permissions for the code segment. */
6623 if ( enmMemAccess == VMXMEMACCESS_WRITE
6624 || ( enmMemAccess == VMXMEMACCESS_READ
6625 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6626 {
6627 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6628 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6629 vmxHCSetPendingXcptGP(pVCpu, 0);
6630 return VINF_HM_PENDING_XCPT;
6631 }
6632
6633 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6634 if ( GCPtrFirst32 > pSel->u32Limit
6635 || GCPtrLast32 > pSel->u32Limit)
6636 {
6637 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6638 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6639 if (iSegReg == X86_SREG_SS)
6640 vmxHCSetPendingXcptSS(pVCpu, 0);
6641 else
6642 vmxHCSetPendingXcptGP(pVCpu, 0);
6643 return VINF_HM_PENDING_XCPT;
6644 }
6645 }
6646 }
6647 else
6648 {
6649 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6650 vmxHCSetPendingXcptGP(pVCpu, 0);
6651 return VINF_HM_PENDING_XCPT;
6652 }
6653
6654 *pGCPtrMem = GCPtrMem;
6655 return VINF_SUCCESS;
6656}
6657#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6658
6659
6660/**
6661 * VM-exit helper for LMSW.
6662 */
6663static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6664{
6665 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6666 AssertRCReturn(rc, rc);
6667
6668 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6669 AssertMsg( rcStrict == VINF_SUCCESS
6670 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6671
6672 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6673 if (rcStrict == VINF_IEM_RAISED_XCPT)
6674 {
6675 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6676 rcStrict = VINF_SUCCESS;
6677 }
6678
6679 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6680 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6681 return rcStrict;
6682}
6683
6684
6685/**
6686 * VM-exit helper for CLTS.
6687 */
6688static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6689{
6690 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6691 AssertRCReturn(rc, rc);
6692
6693 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6694 AssertMsg( rcStrict == VINF_SUCCESS
6695 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6696
6697 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6698 if (rcStrict == VINF_IEM_RAISED_XCPT)
6699 {
6700 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6701 rcStrict = VINF_SUCCESS;
6702 }
6703
6704 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6705 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6706 return rcStrict;
6707}
6708
6709
6710/**
6711 * VM-exit helper for MOV from CRx (CRx read).
6712 */
6713static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6714{
6715 Assert(iCrReg < 16);
6716 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6717
6718 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
6719 AssertRCReturn(rc, rc);
6720
6721 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6722 AssertMsg( rcStrict == VINF_SUCCESS
6723 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6724
6725 if (iGReg == X86_GREG_xSP)
6726 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6727 else
6728 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6729#ifdef VBOX_WITH_STATISTICS
6730 switch (iCrReg)
6731 {
6732 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6733 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6734 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6735 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6736 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6737 }
6738#endif
6739 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6740 return rcStrict;
6741}
6742
6743
6744/**
6745 * VM-exit helper for MOV to CRx (CRx write).
6746 */
6747static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6748{
6749 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6750
6751 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6752 AssertMsg( rcStrict == VINF_SUCCESS
6753 || rcStrict == VINF_IEM_RAISED_XCPT
6754 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6755
6756 switch (iCrReg)
6757 {
6758 case 0:
6759 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6760 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6761 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6762 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6763 break;
6764
6765 case 2:
6766 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6767 /* Nothing to do here, CR2 it's not part of the VMCS. */
6768 break;
6769
6770 case 3:
6771 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6772 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6773 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6774 break;
6775
6776 case 4:
6777 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6778 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6779#ifndef IN_NEM_DARWIN
6780 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6781 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6782#else
6783 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6784#endif
6785 break;
6786
6787 case 8:
6788 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6789 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6790 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6791 break;
6792
6793 default:
6794 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6795 break;
6796 }
6797
6798 if (rcStrict == VINF_IEM_RAISED_XCPT)
6799 {
6800 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6801 rcStrict = VINF_SUCCESS;
6802 }
6803 return rcStrict;
6804}
6805
6806
6807/**
6808 * VM-exit exception handler for \#PF (Page-fault exception).
6809 *
6810 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6811 */
6812static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6813{
6814 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6815 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
6816
6817#ifndef IN_NEM_DARWIN
6818 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6819 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6820 { /* likely */ }
6821 else
6822#endif
6823 {
6824#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6825 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6826#endif
6827 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6828 if (!pVmxTransient->fVectoringDoublePF)
6829 {
6830 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6831 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6832 }
6833 else
6834 {
6835 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6836 Assert(!pVmxTransient->fIsNestedGuest);
6837 vmxHCSetPendingXcptDF(pVCpu);
6838 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6839 }
6840 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6841 return VINF_SUCCESS;
6842 }
6843
6844 Assert(!pVmxTransient->fIsNestedGuest);
6845
6846 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6847 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6848 if (pVmxTransient->fVectoringPF)
6849 {
6850 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6851 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6852 }
6853
6854 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6855 AssertRCReturn(rc, rc);
6856
6857 Log4Func(("#PF: cs:rip=%#04x:%08RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
6858 pVCpu->cpum.GstCtx.rip, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pVCpu->cpum.GstCtx.cr3));
6859
6860 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6861 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, &pVCpu->cpum.GstCtx, (RTGCPTR)pVmxTransient->uExitQual);
6862
6863 Log4Func(("#PF: rc=%Rrc\n", rc));
6864 if (rc == VINF_SUCCESS)
6865 {
6866 /*
6867 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6868 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6869 */
6870 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6871 TRPMResetTrap(pVCpu);
6872 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6873 return rc;
6874 }
6875
6876 if (rc == VINF_EM_RAW_GUEST_TRAP)
6877 {
6878 if (!pVmxTransient->fVectoringDoublePF)
6879 {
6880 /* It's a guest page fault and needs to be reflected to the guest. */
6881 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6882 TRPMResetTrap(pVCpu);
6883 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6884 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6885 uGstErrorCode, pVmxTransient->uExitQual);
6886 }
6887 else
6888 {
6889 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6890 TRPMResetTrap(pVCpu);
6891 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6892 vmxHCSetPendingXcptDF(pVCpu);
6893 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6894 }
6895
6896 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6897 return VINF_SUCCESS;
6898 }
6899
6900 TRPMResetTrap(pVCpu);
6901 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6902 return rc;
6903}
6904
6905
6906/**
6907 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6908 *
6909 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6910 */
6911static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6912{
6913 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6914 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6915
6916 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR0>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6917 AssertRCReturn(rc, rc);
6918
6919 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6920 {
6921 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6922 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6923
6924 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6925 * provides VM-exit instruction length. If this causes problem later,
6926 * disassemble the instruction like it's done on AMD-V. */
6927 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6928 AssertRCReturn(rc2, rc2);
6929 return rc;
6930 }
6931
6932 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6933 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6934 return VINF_SUCCESS;
6935}
6936
6937
6938/**
6939 * VM-exit exception handler for \#BP (Breakpoint exception).
6940 *
6941 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6942 */
6943static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6944{
6945 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6946 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6947
6948 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6949 AssertRCReturn(rc, rc);
6950
6951 VBOXSTRICTRC rcStrict;
6952 if (!pVmxTransient->fIsNestedGuest)
6953 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
6954 else
6955 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6956
6957 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6958 {
6959 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6960 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6961 rcStrict = VINF_SUCCESS;
6962 }
6963
6964 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6965 return rcStrict;
6966}
6967
6968
6969/**
6970 * VM-exit exception handler for \#AC (Alignment-check exception).
6971 *
6972 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6973 */
6974static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6975{
6976 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6977
6978 /*
6979 * Detect #ACs caused by host having enabled split-lock detection.
6980 * Emulate such instructions.
6981 */
6982#define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
6983 int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
6984 AssertRCReturn(rc, rc);
6985 /** @todo detect split lock in cpu feature? */
6986 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6987 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6988 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6989 || CPUMGetGuestCPL(pVCpu) != 3
6990 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6991 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6992 {
6993 /*
6994 * Check for debug/trace events and import state accordingly.
6995 */
6996 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6997 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6998 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6999#ifndef IN_NEM_DARWIN
7000 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
7001#endif
7002 )
7003 {
7004 if (pVM->cCpus == 1)
7005 {
7006#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7007 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
7008 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7009#else
7010 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7011 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7012#endif
7013 AssertRCReturn(rc, rc);
7014 }
7015 }
7016 else
7017 {
7018 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7019 VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7020 AssertRCReturn(rc, rc);
7021
7022 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
7023
7024 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
7025 {
7026 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
7027 if (rcStrict != VINF_SUCCESS)
7028 return rcStrict;
7029 }
7030 }
7031
7032 /*
7033 * Emulate the instruction.
7034 *
7035 * We have to ignore the LOCK prefix here as we must not retrigger the
7036 * detection on the host. This isn't all that satisfactory, though...
7037 */
7038 if (pVM->cCpus == 1)
7039 {
7040 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
7041 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7042
7043 /** @todo For SMP configs we should do a rendezvous here. */
7044 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
7045 if (rcStrict == VINF_SUCCESS)
7046#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
7047 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
7048 HM_CHANGED_GUEST_RIP
7049 | HM_CHANGED_GUEST_RFLAGS
7050 | HM_CHANGED_GUEST_GPRS_MASK
7051 | HM_CHANGED_GUEST_CS
7052 | HM_CHANGED_GUEST_SS);
7053#else
7054 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7055#endif
7056 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7057 {
7058 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7059 rcStrict = VINF_SUCCESS;
7060 }
7061 return rcStrict;
7062 }
7063 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
7064 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
7065 return VINF_EM_EMULATE_SPLIT_LOCK;
7066 }
7067
7068 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
7069 Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7070 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
7071
7072 /* Re-inject it. We'll detect any nesting before getting here. */
7073 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7074 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7075 return VINF_SUCCESS;
7076}
7077
7078
7079/**
7080 * VM-exit exception handler for \#DB (Debug exception).
7081 *
7082 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7083 */
7084static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7085{
7086 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7087 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
7088
7089 /*
7090 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
7091 */
7092 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
7093
7094 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
7095 uint64_t const uDR6 = X86_DR6_INIT_VAL
7096 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
7097 | X86_DR6_BD | X86_DR6_BS));
7098 Log6Func(("uDR6=%#RX64 uExitQual=%#RX64\n", uDR6, pVmxTransient->uExitQual));
7099
7100 int rc;
7101 if (!pVmxTransient->fIsNestedGuest)
7102 {
7103 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx, uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7104
7105 /*
7106 * Prevents stepping twice over the same instruction when the guest is stepping using
7107 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
7108 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
7109 */
7110 if ( rc == VINF_EM_DBG_STEPPED
7111 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
7112 {
7113 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
7114 rc = VINF_EM_RAW_GUEST_TRAP;
7115 }
7116 }
7117 else
7118 rc = VINF_EM_RAW_GUEST_TRAP;
7119 Log6Func(("rc=%Rrc\n", rc));
7120 if (rc == VINF_EM_RAW_GUEST_TRAP)
7121 {
7122 /*
7123 * The exception was for the guest. Update DR6, DR7.GD and
7124 * IA32_DEBUGCTL.LBR before forwarding it.
7125 * See Intel spec. 27.1 "Architectural State before a VM-Exit"
7126 * and @sdmv3{077,622,17.2.3,Debug Status Register (DR6)}.
7127 */
7128#ifndef IN_NEM_DARWIN
7129 VMMRZCallRing3Disable(pVCpu);
7130 HM_DISABLE_PREEMPT(pVCpu);
7131
7132 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
7133 pVCpu->cpum.GstCtx.dr[6] |= uDR6;
7134 if (CPUMIsGuestDebugStateActive(pVCpu))
7135 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
7136
7137 HM_RESTORE_PREEMPT();
7138 VMMRZCallRing3Enable(pVCpu);
7139#else
7140 /** @todo */
7141#endif
7142
7143 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7144 AssertRCReturn(rc, rc);
7145
7146 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
7147 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_GD;
7148
7149 /* Paranoia. */
7150 pVCpu->cpum.GstCtx.dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
7151 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
7152
7153 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pVCpu->cpum.GstCtx.dr[7]);
7154 AssertRC(rc);
7155
7156 /*
7157 * Raise #DB in the guest.
7158 *
7159 * It is important to reflect exactly what the VM-exit gave us (preserving the
7160 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
7161 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
7162 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
7163 *
7164 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
7165 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7166 */
7167 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7168 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7169 return VINF_SUCCESS;
7170 }
7171
7172 /*
7173 * Not a guest trap, must be a hypervisor related debug event then.
7174 * Update DR6 in case someone is interested in it.
7175 */
7176 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
7177 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
7178 CPUMSetHyperDR6(pVCpu, uDR6);
7179
7180 return rc;
7181}
7182
7183
7184/**
7185 * Hacks its way around the lovely mesa driver's backdoor accesses.
7186 *
7187 * @sa hmR0SvmHandleMesaDrvGp.
7188 */
7189static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7190{
7191 LogFunc(("cs:rip=%#04x:%08RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
7192 RT_NOREF(pCtx);
7193
7194 /* For now we'll just skip the instruction. */
7195 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7196}
7197
7198
7199/**
7200 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7201 * backdoor logging w/o checking what it is running inside.
7202 *
7203 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7204 * backdoor port and magic numbers loaded in registers.
7205 *
7206 * @returns true if it is, false if it isn't.
7207 * @sa hmR0SvmIsMesaDrvGp.
7208 */
7209DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
7210{
7211 /* 0xed: IN eAX,dx */
7212 uint8_t abInstr[1];
7213 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
7214 return false;
7215
7216 /* Check that it is #GP(0). */
7217 if (pVmxTransient->uExitIntErrorCode != 0)
7218 return false;
7219
7220 /* Check magic and port. */
7221 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7222 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
7223 if (pCtx->rax != UINT32_C(0x564d5868))
7224 return false;
7225 if (pCtx->dx != UINT32_C(0x5658))
7226 return false;
7227
7228 /* Flat ring-3 CS. */
7229 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
7230 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
7231 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
7232 if (pCtx->cs.Attr.n.u2Dpl != 3)
7233 return false;
7234 if (pCtx->cs.u64Base != 0)
7235 return false;
7236
7237 /* Check opcode. */
7238 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
7239 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
7240 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7241 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
7242 if (RT_FAILURE(rc))
7243 return false;
7244 if (abInstr[0] != 0xed)
7245 return false;
7246
7247 return true;
7248}
7249
7250
7251/**
7252 * VM-exit exception handler for \#GP (General-protection exception).
7253 *
7254 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7255 */
7256static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7257{
7258 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7259 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
7260
7261 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7262 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7263#ifndef IN_NEM_DARWIN
7264 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7265 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7266 { /* likely */ }
7267 else
7268#endif
7269 {
7270#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7271# ifndef IN_NEM_DARWIN
7272 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7273# else
7274 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
7275# endif
7276#endif
7277 /*
7278 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
7279 * executing a nested-guest, reflect #GP to the guest or nested-guest.
7280 */
7281 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7282 AssertRCReturn(rc, rc);
7283 Log4Func(("Gst: cs:rip=%#04x:%08RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
7284 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
7285
7286 if ( pVmxTransient->fIsNestedGuest
7287 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
7288 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
7289 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7290 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7291 else
7292 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
7293 return rc;
7294 }
7295
7296#ifndef IN_NEM_DARWIN
7297 Assert(CPUMIsGuestInRealModeEx(pCtx));
7298 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
7299 Assert(!pVmxTransient->fIsNestedGuest);
7300
7301 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
7302 AssertRCReturn(rc, rc);
7303
7304 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
7305 if (rcStrict == VINF_SUCCESS)
7306 {
7307 if (!CPUMIsGuestInRealModeEx(pCtx))
7308 {
7309 /*
7310 * The guest is no longer in real-mode, check if we can continue executing the
7311 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
7312 */
7313 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
7314 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
7315 {
7316 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
7317 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7318 }
7319 else
7320 {
7321 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
7322 rcStrict = VINF_EM_RESCHEDULE;
7323 }
7324 }
7325 else
7326 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7327 }
7328 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7329 {
7330 rcStrict = VINF_SUCCESS;
7331 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7332 }
7333 return VBOXSTRICTRC_VAL(rcStrict);
7334#endif
7335}
7336
7337
7338/**
7339 * VM-exit exception handler for \#DE (Divide Error).
7340 *
7341 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7342 */
7343static VBOXSTRICTRC vmxHCExitXcptDE(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7344{
7345 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7346 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE);
7347
7348 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7349 AssertRCReturn(rc, rc);
7350
7351 VBOXSTRICTRC rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7352 if (VCPU_2_VMXSTATE(pVCpu).fGCMTrapXcptDE)
7353 {
7354 uint8_t cbInstr = 0;
7355 VBOXSTRICTRC rc2 = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7356 if (rc2 == VINF_SUCCESS)
7357 rcStrict = VINF_SUCCESS; /* Restart instruction with modified guest register context. */
7358 else if (rc2 == VERR_NOT_FOUND)
7359 rcStrict = VERR_NOT_FOUND; /* Deliver the exception. */
7360 else
7361 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7362 }
7363 else
7364 rcStrict = VINF_SUCCESS; /* Do nothing. */
7365
7366 /* If the GCM #DE exception handler didn't succeed or wasn't needed, raise #DE. */
7367 if (RT_FAILURE(rcStrict))
7368 {
7369 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7370 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7371 rcStrict = VINF_SUCCESS;
7372 }
7373
7374 Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE);
7375 return VBOXSTRICTRC_VAL(rcStrict);
7376}
7377
7378
7379/**
7380 * VM-exit exception handler wrapper for all other exceptions that are not handled
7381 * by a specific handler.
7382 *
7383 * This simply re-injects the exception back into the VM without any special
7384 * processing.
7385 *
7386 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
7387 */
7388static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7389{
7390 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7391
7392#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7393# ifndef IN_NEM_DARWIN
7394 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7395 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
7396 ("uVector=%#x u32XcptBitmap=%#X32\n",
7397 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
7398 NOREF(pVmcsInfo);
7399# endif
7400#endif
7401
7402 /*
7403 * Re-inject the exception into the guest. This cannot be a double-fault condition which
7404 * would have been handled while checking exits due to event delivery.
7405 */
7406 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7407
7408#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7409 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
7410 AssertRCReturn(rc, rc);
7411 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%08RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7412#endif
7413
7414#ifdef VBOX_WITH_STATISTICS
7415 switch (uVector)
7416 {
7417 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
7418 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
7419 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
7420 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7421 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
7422 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
7423 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
7424 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
7425 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
7426 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
7427 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
7428 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
7429 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
7430 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
7431 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
7432 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
7433 default:
7434 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
7435 break;
7436 }
7437#endif
7438
7439 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
7440 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
7441 NOREF(uVector);
7442
7443 /* Re-inject the original exception into the guest. */
7444 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
7445 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
7446 return VINF_SUCCESS;
7447}
7448
7449
7450/**
7451 * VM-exit exception handler for all exceptions (except NMIs!).
7452 *
7453 * @remarks This may be called for both guests and nested-guests. Take care to not
7454 * make assumptions and avoid doing anything that is not relevant when
7455 * executing a nested-guest (e.g., Mesa driver hacks).
7456 */
7457static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7458{
7459 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
7460
7461 /*
7462 * If this VM-exit occurred while delivering an event through the guest IDT, take
7463 * action based on the return code and additional hints (e.g. for page-faults)
7464 * that will be updated in the VMX transient structure.
7465 */
7466 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
7467 if (rcStrict == VINF_SUCCESS)
7468 {
7469 /*
7470 * If an exception caused a VM-exit due to delivery of an event, the original
7471 * event may have to be re-injected into the guest. We shall reinject it and
7472 * continue guest execution. However, page-fault is a complicated case and
7473 * needs additional processing done in vmxHCExitXcptPF().
7474 */
7475 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7476 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7477 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
7478 || uVector == X86_XCPT_PF)
7479 {
7480 switch (uVector)
7481 {
7482 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
7483 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
7484 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
7485 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
7486 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
7487 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
7488 case X86_XCPT_DE: return vmxHCExitXcptDE(pVCpu, pVmxTransient);
7489 default:
7490 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
7491 }
7492 }
7493 /* else: inject pending event before resuming guest execution. */
7494 }
7495 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
7496 {
7497 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
7498 rcStrict = VINF_SUCCESS;
7499 }
7500
7501 return rcStrict;
7502}
7503/** @} */
7504
7505
7506/** @name VM-exit handlers.
7507 * @{
7508 */
7509/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7510/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7511/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7512
7513/**
7514 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7515 */
7516HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7517{
7518 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7519 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
7520
7521#ifndef IN_NEM_DARWIN
7522 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
7523 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
7524 return VINF_SUCCESS;
7525 return VINF_EM_RAW_INTERRUPT;
7526#else
7527 return VINF_SUCCESS;
7528#endif
7529}
7530
7531
7532/**
7533 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7534 * VM-exit.
7535 */
7536HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7537{
7538 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7539 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7540
7541 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
7542
7543 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7544 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7545 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7546
7547 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7548 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7549 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7550 NOREF(pVmcsInfo);
7551
7552 VBOXSTRICTRC rcStrict;
7553 switch (uExitIntType)
7554 {
7555#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7556 /*
7557 * Host physical NMIs:
7558 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7559 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7560 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7561 *
7562 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7563 * See Intel spec. 27.5.5 "Updating Non-Register State".
7564 */
7565 case VMX_EXIT_INT_INFO_TYPE_NMI:
7566 {
7567 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7568 break;
7569 }
7570#endif
7571
7572 /*
7573 * Privileged software exceptions (#DB from ICEBP),
7574 * Software exceptions (#BP and #OF),
7575 * Hardware exceptions:
7576 * Process the required exceptions and resume guest execution if possible.
7577 */
7578 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7579 Assert(uVector == X86_XCPT_DB);
7580 RT_FALL_THRU();
7581 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7582 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7583 RT_FALL_THRU();
7584 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7585 {
7586 NOREF(uVector);
7587 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
7588 | HMVMX_READ_EXIT_INSTR_LEN
7589 | HMVMX_READ_IDT_VECTORING_INFO
7590 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
7591 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7592 break;
7593 }
7594
7595 default:
7596 {
7597 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7598 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7599 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7600 break;
7601 }
7602 }
7603
7604 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7605 return rcStrict;
7606}
7607
7608
7609/**
7610 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7611 */
7612HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7613{
7614 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7615
7616 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7617 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7618 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7619
7620 /* Evaluate and deliver pending events and resume guest execution. */
7621 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7622 return VINF_SUCCESS;
7623}
7624
7625
7626/**
7627 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7628 */
7629HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7630{
7631 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7632
7633 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7634 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7635 {
7636 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7637 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7638 }
7639
7640 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
7641
7642 /*
7643 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7644 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7645 */
7646 uint32_t fIntrState;
7647 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7648 AssertRC(rc);
7649 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7650 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7651 {
7652 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
7653
7654 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7655 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7656 AssertRC(rc);
7657 }
7658
7659 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7660 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7661
7662 /* Evaluate and deliver pending events and resume guest execution. */
7663 return VINF_SUCCESS;
7664}
7665
7666
7667/**
7668 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7669 */
7670HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7671{
7672 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7673 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7674}
7675
7676
7677/**
7678 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7679 */
7680HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7681{
7682 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7683 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7684}
7685
7686
7687/**
7688 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7689 */
7690HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7691{
7692 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7693
7694 /*
7695 * Get the state we need and update the exit history entry.
7696 */
7697 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7698 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7699 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7700 AssertRCReturn(rc, rc);
7701
7702 VBOXSTRICTRC rcStrict;
7703 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7704 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7705 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7706 if (!pExitRec)
7707 {
7708 /*
7709 * Regular CPUID instruction execution.
7710 */
7711 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7712 if (rcStrict == VINF_SUCCESS)
7713 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7714 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7715 {
7716 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7717 rcStrict = VINF_SUCCESS;
7718 }
7719 }
7720 else
7721 {
7722 /*
7723 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7724 */
7725 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
7726 IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7727 AssertRCReturn(rc2, rc2);
7728
7729 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7730 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7731
7732 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7733 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7734
7735 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7736 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7737 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7738 }
7739 return rcStrict;
7740}
7741
7742
7743/**
7744 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7745 */
7746HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7747{
7748 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7749
7750 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7751 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7752 AssertRCReturn(rc, rc);
7753
7754 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7755 return VINF_EM_RAW_EMULATE_INSTR;
7756
7757 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7758 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7759}
7760
7761
7762/**
7763 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7764 */
7765HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7766{
7767 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7768
7769 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7770 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7771 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7772 AssertRCReturn(rc, rc);
7773
7774 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7775 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7776 {
7777 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7778 we must reset offsetting on VM-entry. See @bugref{6634}. */
7779 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7780 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7781 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7782 }
7783 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7784 {
7785 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7786 rcStrict = VINF_SUCCESS;
7787 }
7788 return rcStrict;
7789}
7790
7791
7792/**
7793 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7794 */
7795HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7796{
7797 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7798
7799 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7800 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7801 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX>(pVCpu, pVmcsInfo, __FUNCTION__);
7802 AssertRCReturn(rc, rc);
7803
7804 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7805 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7806 {
7807 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7808 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7809 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7810 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7811 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7812 }
7813 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7814 {
7815 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7816 rcStrict = VINF_SUCCESS;
7817 }
7818 return rcStrict;
7819}
7820
7821
7822/**
7823 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7824 */
7825HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7826{
7827 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7828
7829 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7830 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7831 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
7832 AssertRCReturn(rc, rc);
7833
7834 VBOXSTRICTRC rcStrict = IEMExecDecodedRdpmc(pVCpu, pVmxTransient->cbExitInstr);
7835 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7836 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7837 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7838 {
7839 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7840 rcStrict = VINF_SUCCESS;
7841 }
7842 return rcStrict;
7843}
7844
7845
7846/**
7847 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7848 */
7849HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7850{
7851 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7852
7853 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7854 if (EMAreHypercallInstructionsEnabled(pVCpu))
7855 {
7856 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7857 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RIP
7858 | CPUMCTX_EXTRN_RFLAGS
7859 | CPUMCTX_EXTRN_CR0
7860 | CPUMCTX_EXTRN_SS
7861 | CPUMCTX_EXTRN_CS
7862 | CPUMCTX_EXTRN_EFER>(pVCpu, pVmcsInfo, __FUNCTION__);
7863 AssertRCReturn(rc, rc);
7864
7865 /* Perform the hypercall. */
7866 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7867 if (rcStrict == VINF_SUCCESS)
7868 {
7869 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7870 AssertRCReturn(rc, rc);
7871 }
7872 else
7873 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7874 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7875 || RT_FAILURE(rcStrict));
7876
7877 /* If the hypercall changes anything other than guest's general-purpose registers,
7878 we would need to reload the guest changed bits here before VM-entry. */
7879 }
7880 else
7881 Log4Func(("Hypercalls not enabled\n"));
7882
7883 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7884 if (RT_FAILURE(rcStrict))
7885 {
7886 vmxHCSetPendingXcptUD(pVCpu);
7887 rcStrict = VINF_SUCCESS;
7888 }
7889
7890 return rcStrict;
7891}
7892
7893
7894/**
7895 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7896 */
7897HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7898{
7899 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7900#ifndef IN_NEM_DARWIN
7901 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7902#endif
7903
7904 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7905 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
7906 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7907 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7908 AssertRCReturn(rc, rc);
7909
7910 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7911
7912 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7913 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7914 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7915 {
7916 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7917 rcStrict = VINF_SUCCESS;
7918 }
7919 else
7920 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7921 VBOXSTRICTRC_VAL(rcStrict)));
7922 return rcStrict;
7923}
7924
7925
7926/**
7927 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7928 */
7929HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7930{
7931 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7932
7933 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7934 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7935 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS>(pVCpu, pVmcsInfo, __FUNCTION__);
7936 AssertRCReturn(rc, rc);
7937
7938 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7939 if (rcStrict == VINF_SUCCESS)
7940 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7941 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7942 {
7943 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7944 rcStrict = VINF_SUCCESS;
7945 }
7946
7947 return rcStrict;
7948}
7949
7950
7951/**
7952 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7953 */
7954HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7955{
7956 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7957
7958 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7959 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
7960 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
7961 AssertRCReturn(rc, rc);
7962
7963 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7964 if (RT_SUCCESS(rcStrict))
7965 {
7966 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7967 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7968 rcStrict = VINF_SUCCESS;
7969 }
7970
7971 return rcStrict;
7972}
7973
7974
7975/**
7976 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7977 * VM-exit.
7978 */
7979HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7980{
7981 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7982 return VINF_EM_RESET;
7983}
7984
7985
7986/**
7987 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7988 */
7989HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7990{
7991 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7992
7993 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7994 AssertRCReturn(rc, rc);
7995
7996 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7997 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7998 rc = VINF_SUCCESS;
7999 else
8000 rc = VINF_EM_HALT;
8001
8002 if (rc != VINF_SUCCESS)
8003 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
8004 return rc;
8005}
8006
8007
8008#ifndef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8009/**
8010 * VM-exit handler for instructions that result in a \#UD exception delivered to
8011 * the guest.
8012 */
8013HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8014{
8015 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8016 vmxHCSetPendingXcptUD(pVCpu);
8017 return VINF_SUCCESS;
8018}
8019#endif
8020
8021
8022/**
8023 * VM-exit handler for expiry of the VMX-preemption timer.
8024 */
8025HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8026{
8027 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8028
8029 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
8030 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8031Log12(("vmxHCExitPreemptTimer:\n"));
8032
8033 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
8034 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8035 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
8036 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
8037 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
8038}
8039
8040
8041/**
8042 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
8043 */
8044HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8045{
8046 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8047
8048 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8049 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8050 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4>(pVCpu, pVmcsInfo, __FUNCTION__);
8051 AssertRCReturn(rc, rc);
8052
8053 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
8054 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8055 : HM_CHANGED_RAISED_XCPT_MASK);
8056
8057#ifndef IN_NEM_DARWIN
8058 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8059 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
8060 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
8061 {
8062 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
8063 hmR0VmxUpdateStartVmFunction(pVCpu);
8064 }
8065#endif
8066
8067 return rcStrict;
8068}
8069
8070
8071/**
8072 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
8073 */
8074HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8075{
8076 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8077
8078 /** @todo Enable the new code after finding a reliably guest test-case. */
8079#if 1
8080 return VERR_EM_INTERPRETER;
8081#else
8082 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8083 | HMVMX_READ_EXIT_INSTR_INFO
8084 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8085 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8086 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8087 AssertRCReturn(rc, rc);
8088
8089 /* Paranoia. Ensure this has a memory operand. */
8090 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
8091
8092 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
8093 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
8094 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
8095 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
8096
8097 RTGCPTR GCPtrDesc;
8098 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
8099
8100 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
8101 GCPtrDesc, uType);
8102 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8103 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8104 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8105 {
8106 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8107 rcStrict = VINF_SUCCESS;
8108 }
8109 return rcStrict;
8110#endif
8111}
8112
8113
8114/**
8115 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
8116 * VM-exit.
8117 */
8118HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8119{
8120 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8121 int rc = vmxHCImportGuestStateEx(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8122 AssertRCReturn(rc, rc);
8123
8124 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
8125 if (RT_FAILURE(rc))
8126 return rc;
8127
8128 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
8129 NOREF(uInvalidReason);
8130
8131#ifdef VBOX_STRICT
8132 uint32_t fIntrState;
8133 uint64_t u64Val;
8134 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_INFO
8135 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8136 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
8137
8138 Log4(("uInvalidReason %u\n", uInvalidReason));
8139 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
8140 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
8141 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
8142
8143 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
8144 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
8145 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
8146 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
8147 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
8148 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
8149 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
8150 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8151 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
8152 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
8153 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
8154 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
8155# ifndef IN_NEM_DARWIN
8156 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
8157 {
8158 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
8159 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
8160 }
8161
8162 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
8163# endif
8164#endif
8165
8166 return VERR_VMX_INVALID_GUEST_STATE;
8167}
8168
8169/**
8170 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
8171 */
8172HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8173{
8174 /*
8175 * Cumulative notes of all recognized but unexpected VM-exits.
8176 *
8177 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
8178 * nested-paging is used.
8179 *
8180 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
8181 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
8182 * this function (and thereby stop VM execution) for handling such instructions.
8183 *
8184 *
8185 * VMX_EXIT_INIT_SIGNAL:
8186 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8187 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
8188 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
8189 *
8190 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
8191 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
8192 * See Intel spec. "23.8 Restrictions on VMX operation".
8193 *
8194 * VMX_EXIT_SIPI:
8195 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
8196 * activity state is used. We don't make use of it as our guests don't have direct
8197 * access to the host local APIC.
8198 *
8199 * See Intel spec. 25.3 "Other Causes of VM-exits".
8200 *
8201 * VMX_EXIT_IO_SMI:
8202 * VMX_EXIT_SMI:
8203 * This can only happen if we support dual-monitor treatment of SMI, which can be
8204 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
8205 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
8206 * VMX root mode or receive an SMI. If we get here, something funny is going on.
8207 *
8208 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
8209 * See Intel spec. 25.3 "Other Causes of VM-Exits"
8210 *
8211 * VMX_EXIT_ERR_MSR_LOAD:
8212 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
8213 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
8214 * execution.
8215 *
8216 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
8217 *
8218 * VMX_EXIT_ERR_MACHINE_CHECK:
8219 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
8220 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
8221 * #MC exception abort class exception is raised. We thus cannot assume a
8222 * reasonable chance of continuing any sort of execution and we bail.
8223 *
8224 * See Intel spec. 15.1 "Machine-check Architecture".
8225 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
8226 *
8227 * VMX_EXIT_PML_FULL:
8228 * VMX_EXIT_VIRTUALIZED_EOI:
8229 * VMX_EXIT_APIC_WRITE:
8230 * We do not currently support any of these features and thus they are all unexpected
8231 * VM-exits.
8232 *
8233 * VMX_EXIT_GDTR_IDTR_ACCESS:
8234 * VMX_EXIT_LDTR_TR_ACCESS:
8235 * VMX_EXIT_RDRAND:
8236 * VMX_EXIT_RSM:
8237 * VMX_EXIT_VMFUNC:
8238 * VMX_EXIT_ENCLS:
8239 * VMX_EXIT_RDSEED:
8240 * VMX_EXIT_XSAVES:
8241 * VMX_EXIT_XRSTORS:
8242 * VMX_EXIT_UMWAIT:
8243 * VMX_EXIT_TPAUSE:
8244 * VMX_EXIT_LOADIWKEY:
8245 * These VM-exits are -not- caused unconditionally by execution of the corresponding
8246 * instruction. Any VM-exit for these instructions indicate a hardware problem,
8247 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
8248 *
8249 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
8250 */
8251 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8252 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
8253 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8254}
8255
8256
8257/**
8258 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
8259 */
8260HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8261{
8262 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8263
8264 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8265
8266 /** @todo Optimize this: We currently drag in the whole MSR state
8267 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8268 * MSRs required. That would require changes to IEM and possibly CPUM too.
8269 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8270 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8271 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8272 int rc;
8273 switch (idMsr)
8274 {
8275 default:
8276 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8277 __FUNCTION__);
8278 AssertRCReturn(rc, rc);
8279 break;
8280 case MSR_K8_FS_BASE:
8281 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8282 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8283 AssertRCReturn(rc, rc);
8284 break;
8285 case MSR_K8_GS_BASE:
8286 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8287 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8288 AssertRCReturn(rc, rc);
8289 break;
8290 }
8291
8292 Log4Func(("ecx=%#RX32\n", idMsr));
8293
8294#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8295 Assert(!pVmxTransient->fIsNestedGuest);
8296 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8297 {
8298 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
8299 && idMsr != MSR_K6_EFER)
8300 {
8301 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
8302 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8303 }
8304 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8305 {
8306 Assert(pVmcsInfo->pvMsrBitmap);
8307 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8308 if (fMsrpm & VMXMSRPM_ALLOW_RD)
8309 {
8310 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
8311 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8312 }
8313 }
8314 }
8315#endif
8316
8317 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
8318 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
8319 if (rcStrict == VINF_SUCCESS)
8320 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8321 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8322 {
8323 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8324 rcStrict = VINF_SUCCESS;
8325 }
8326 else
8327 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
8328 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8329
8330 return rcStrict;
8331}
8332
8333
8334/**
8335 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
8336 */
8337HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8338{
8339 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8340
8341 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8342
8343 /*
8344 * The FS and GS base MSRs are not part of the above all-MSRs mask.
8345 * Although we don't need to fetch the base as it will be overwritten shortly, while
8346 * loading guest-state we would also load the entire segment register including limit
8347 * and attributes and thus we need to load them here.
8348 */
8349 /** @todo Optimize this: We currently drag in the whole MSR state
8350 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
8351 * MSRs required. That would require changes to IEM and possibly CPUM too.
8352 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
8353 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8354 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
8355 int rc;
8356 switch (idMsr)
8357 {
8358 default:
8359 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS>(pVCpu, pVmcsInfo,
8360 __FUNCTION__);
8361 AssertRCReturn(rc, rc);
8362 break;
8363
8364 case MSR_K8_FS_BASE:
8365 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8366 | CPUMCTX_EXTRN_FS>(pVCpu, pVmcsInfo, __FUNCTION__);
8367 AssertRCReturn(rc, rc);
8368 break;
8369 case MSR_K8_GS_BASE:
8370 rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS
8371 | CPUMCTX_EXTRN_GS>(pVCpu, pVmcsInfo, __FUNCTION__);
8372 AssertRCReturn(rc, rc);
8373 break;
8374 }
8375 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
8376
8377 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
8378 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
8379
8380 if (rcStrict == VINF_SUCCESS)
8381 {
8382 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
8383
8384 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
8385 if ( idMsr == MSR_IA32_APICBASE
8386 || ( idMsr >= MSR_IA32_X2APIC_START
8387 && idMsr <= MSR_IA32_X2APIC_END))
8388 {
8389 /*
8390 * We've already saved the APIC related guest-state (TPR) in post-run phase.
8391 * When full APIC register virtualization is implemented we'll have to make
8392 * sure APIC state is saved from the VMCS before IEM changes it.
8393 */
8394 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8395 }
8396 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
8397 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8398 else if (idMsr == MSR_K6_EFER)
8399 {
8400 /*
8401 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
8402 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
8403 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
8404 */
8405 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
8406 }
8407
8408 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
8409 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
8410 {
8411 switch (idMsr)
8412 {
8413 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
8414 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
8415 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
8416 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
8417 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
8418 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
8419 default:
8420 {
8421#ifndef IN_NEM_DARWIN
8422 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8423 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
8424 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8425 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
8426#else
8427 AssertMsgFailed(("TODO\n"));
8428#endif
8429 break;
8430 }
8431 }
8432 }
8433#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
8434 else
8435 {
8436 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
8437 switch (idMsr)
8438 {
8439 case MSR_IA32_SYSENTER_CS:
8440 case MSR_IA32_SYSENTER_EIP:
8441 case MSR_IA32_SYSENTER_ESP:
8442 case MSR_K8_FS_BASE:
8443 case MSR_K8_GS_BASE:
8444 {
8445 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
8446 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8447 }
8448
8449 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
8450 default:
8451 {
8452 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
8453 {
8454 /* EFER MSR writes are always intercepted. */
8455 if (idMsr != MSR_K6_EFER)
8456 {
8457 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
8458 idMsr));
8459 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8460 }
8461 }
8462
8463 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
8464 {
8465 Assert(pVmcsInfo->pvMsrBitmap);
8466 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
8467 if (fMsrpm & VMXMSRPM_ALLOW_WR)
8468 {
8469 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
8470 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
8471 }
8472 }
8473 break;
8474 }
8475 }
8476 }
8477#endif /* VBOX_STRICT */
8478 }
8479 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8480 {
8481 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8482 rcStrict = VINF_SUCCESS;
8483 }
8484 else
8485 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
8486 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
8487
8488 return rcStrict;
8489}
8490
8491
8492/**
8493 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
8494 */
8495HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8496{
8497 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8498
8499 /** @todo The guest has likely hit a contended spinlock. We might want to
8500 * poke a schedule different guest VCPU. */
8501 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8502 if (RT_SUCCESS(rc))
8503 return VINF_EM_RAW_INTERRUPT;
8504
8505 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
8506 return rc;
8507}
8508
8509
8510/**
8511 * VM-exit handler for when the TPR value is lowered below the specified
8512 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
8513 */
8514HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8515{
8516 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8517 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
8518
8519 /*
8520 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
8521 * We'll re-evaluate pending interrupts and inject them before the next VM
8522 * entry so we can just continue execution here.
8523 */
8524 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
8525 return VINF_SUCCESS;
8526}
8527
8528
8529/**
8530 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
8531 * VM-exit.
8532 *
8533 * @retval VINF_SUCCESS when guest execution can continue.
8534 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
8535 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
8536 * incompatible guest state for VMX execution (real-on-v86 case).
8537 */
8538HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8539{
8540 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8541 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8542
8543 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8544 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8545 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8546
8547 VBOXSTRICTRC rcStrict;
8548 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8549 uint64_t const uExitQual = pVmxTransient->uExitQual;
8550 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8551 switch (uAccessType)
8552 {
8553 /*
8554 * MOV to CRx.
8555 */
8556 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8557 {
8558 /*
8559 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8560 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8561 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8562 * PAE PDPTEs as well.
8563 */
8564 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
8565 AssertRCReturn(rc, rc);
8566
8567 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8568#ifndef IN_NEM_DARWIN
8569 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8570#endif
8571 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8572 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8573
8574 /*
8575 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8576 * - When nested paging isn't used.
8577 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8578 * - We are executing in the VM debug loop.
8579 */
8580#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8581# ifndef IN_NEM_DARWIN
8582 Assert( iCrReg != 3
8583 || !VM_IS_VMX_NESTED_PAGING(pVM)
8584 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8585 || pVCpu->hmr0.s.fUsingDebugLoop);
8586# else
8587 Assert( iCrReg != 3
8588 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8589# endif
8590#endif
8591
8592 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8593 Assert( iCrReg != 8
8594 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8595
8596 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8597 AssertMsg( rcStrict == VINF_SUCCESS
8598 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8599
8600#ifndef IN_NEM_DARWIN
8601 /*
8602 * This is a kludge for handling switches back to real mode when we try to use
8603 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8604 * deal with special selector values, so we have to return to ring-3 and run
8605 * there till the selector values are V86 mode compatible.
8606 *
8607 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8608 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8609 * this function.
8610 */
8611 if ( iCrReg == 0
8612 && rcStrict == VINF_SUCCESS
8613 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8614 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8615 && (uOldCr0 & X86_CR0_PE)
8616 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8617 {
8618 /** @todo Check selectors rather than returning all the time. */
8619 Assert(!pVmxTransient->fIsNestedGuest);
8620 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8621 rcStrict = VINF_EM_RESCHEDULE_REM;
8622 }
8623#endif
8624
8625 break;
8626 }
8627
8628 /*
8629 * MOV from CRx.
8630 */
8631 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8632 {
8633 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8634 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8635
8636 /*
8637 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8638 * - When nested paging isn't used.
8639 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8640 * - We are executing in the VM debug loop.
8641 */
8642#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8643# ifndef IN_NEM_DARWIN
8644 Assert( iCrReg != 3
8645 || !VM_IS_VMX_NESTED_PAGING(pVM)
8646 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8647 || pVCpu->hmr0.s.fLeaveDone);
8648# else
8649 Assert( iCrReg != 3
8650 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8651# endif
8652#endif
8653
8654 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8655 Assert( iCrReg != 8
8656 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8657
8658 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8659 break;
8660 }
8661
8662 /*
8663 * CLTS (Clear Task-Switch Flag in CR0).
8664 */
8665 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8666 {
8667 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8668 break;
8669 }
8670
8671 /*
8672 * LMSW (Load Machine-Status Word into CR0).
8673 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8674 */
8675 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8676 {
8677 RTGCPTR GCPtrEffDst;
8678 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8679 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8680 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8681 if (fMemOperand)
8682 {
8683 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
8684 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8685 }
8686 else
8687 GCPtrEffDst = NIL_RTGCPTR;
8688 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8689 break;
8690 }
8691
8692 default:
8693 {
8694 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8695 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8696 }
8697 }
8698
8699 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8700 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8701 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8702
8703 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8704 NOREF(pVM);
8705 return rcStrict;
8706}
8707
8708
8709/**
8710 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8711 * VM-exit.
8712 */
8713HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8714{
8715 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8716 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8717
8718 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8719 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8720 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
8721 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8722#define VMX_HC_EXIT_IO_INSTR_INITIAL_REGS (IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER)
8723 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8724 int rc = vmxHCImportGuestState<VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8725 AssertRCReturn(rc, rc);
8726
8727 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8728 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8729 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8730 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8731 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8732 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8733 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8734 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8735
8736 /*
8737 * Update exit history to see if this exit can be optimized.
8738 */
8739 VBOXSTRICTRC rcStrict;
8740 PCEMEXITREC pExitRec = NULL;
8741 if ( !fGstStepping
8742 && !fDbgStepping)
8743 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8744 !fIOString
8745 ? !fIOWrite
8746 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8747 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8748 : !fIOWrite
8749 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8750 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8751 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8752 if (!pExitRec)
8753 {
8754 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8755 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8756
8757 uint32_t const cbValue = s_aIOSizes[uIOSize];
8758 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8759 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8760 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8761 if (fIOString)
8762 {
8763 /*
8764 * INS/OUTS - I/O String instruction.
8765 *
8766 * Use instruction-information if available, otherwise fall back on
8767 * interpreting the instruction.
8768 */
8769 Log4Func(("cs:rip=%#04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8770 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8771 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8772 if (fInsOutsInfo)
8773 {
8774 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
8775 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8776 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8777 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8778 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8779 if (fIOWrite)
8780 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8781 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8782 else
8783 {
8784 /*
8785 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8786 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8787 * See Intel Instruction spec. for "INS".
8788 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8789 */
8790 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8791 }
8792 }
8793 else
8794 rcStrict = IEMExecOne(pVCpu);
8795
8796 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8797 fUpdateRipAlready = true;
8798 }
8799 else
8800 {
8801 /*
8802 * IN/OUT - I/O instruction.
8803 */
8804 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8805 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8806 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8807 if (fIOWrite)
8808 {
8809 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8810 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8811#ifndef IN_NEM_DARWIN
8812 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8813 && !pCtx->eflags.Bits.u1TF)
8814 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8815#endif
8816 }
8817 else
8818 {
8819 uint32_t u32Result = 0;
8820 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8821 if (IOM_SUCCESS(rcStrict))
8822 {
8823 /* Save result of I/O IN instr. in AL/AX/EAX. */
8824 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8825 }
8826#ifndef IN_NEM_DARWIN
8827 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8828 && !pCtx->eflags.Bits.u1TF)
8829 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8830#endif
8831 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8832 }
8833 }
8834
8835 if (IOM_SUCCESS(rcStrict))
8836 {
8837 if (!fUpdateRipAlready)
8838 {
8839 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8840 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8841 }
8842
8843 /*
8844 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8845 * while booting Fedora 17 64-bit guest.
8846 *
8847 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8848 */
8849 if (fIOString)
8850 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8851
8852 /*
8853 * If any I/O breakpoints are armed, we need to check if one triggered
8854 * and take appropriate action.
8855 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8856 */
8857#if 1
8858 AssertCompile(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7);
8859#else
8860 AssertCompile(!(VMX_HC_EXIT_IO_INSTR_INITIAL_REGS & CPUMCTX_EXTRN_DR7));
8861 rc = vmxHCImportGuestState<CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo);
8862 AssertRCReturn(rc, rc);
8863#endif
8864
8865 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8866 * execution engines about whether hyper BPs and such are pending. */
8867 uint32_t const uDr7 = pCtx->dr[7];
8868 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8869 && X86_DR7_ANY_RW_IO(uDr7)
8870 && (pCtx->cr4 & X86_CR4_DE))
8871 || DBGFBpIsHwIoArmed(pVM)))
8872 {
8873 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8874
8875#ifndef IN_NEM_DARWIN
8876 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8877 VMMRZCallRing3Disable(pVCpu);
8878 HM_DISABLE_PREEMPT(pVCpu);
8879
8880 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8881
8882 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8883 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8884 {
8885 /* Raise #DB. */
8886 if (fIsGuestDbgActive)
8887 ASMSetDR6(pCtx->dr[6]);
8888 if (pCtx->dr[7] != uDr7)
8889 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8890
8891 vmxHCSetPendingXcptDB(pVCpu);
8892 }
8893 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8894 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8895 else if ( rcStrict2 != VINF_SUCCESS
8896 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8897 rcStrict = rcStrict2;
8898 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8899
8900 HM_RESTORE_PREEMPT();
8901 VMMRZCallRing3Enable(pVCpu);
8902#else
8903 /** @todo */
8904#endif
8905 }
8906 }
8907
8908#ifdef VBOX_STRICT
8909 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8910 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8911 Assert(!fIOWrite);
8912 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8913 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8914 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8915 Assert(fIOWrite);
8916 else
8917 {
8918# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8919 * statuses, that the VMM device and some others may return. See
8920 * IOM_SUCCESS() for guidance. */
8921 AssertMsg( RT_FAILURE(rcStrict)
8922 || rcStrict == VINF_SUCCESS
8923 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8924 || rcStrict == VINF_EM_DBG_BREAKPOINT
8925 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8926 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8927# endif
8928 }
8929#endif
8930 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8931 }
8932 else
8933 {
8934 /*
8935 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8936 */
8937 int rc2 = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
8938 VMX_HC_EXIT_IO_INSTR_INITIAL_REGS>(pVCpu, pVmcsInfo, __FUNCTION__);
8939 AssertRCReturn(rc2, rc2);
8940 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8941 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8942 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8943 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8944 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8945 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8946
8947 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8948 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8949
8950 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8951 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8952 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8953 }
8954 return rcStrict;
8955}
8956
8957
8958/**
8959 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8960 * VM-exit.
8961 */
8962HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8963{
8964 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8965
8966 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8967 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
8968 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8969 {
8970 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_INFO>(pVCpu, pVmxTransient);
8971 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8972 {
8973 uint32_t uErrCode;
8974 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8975 {
8976 vmxHCReadToTransient<HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
8977 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8978 }
8979 else
8980 uErrCode = 0;
8981
8982 RTGCUINTPTR GCPtrFaultAddress;
8983 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8984 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8985 else
8986 GCPtrFaultAddress = 0;
8987
8988 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
8989
8990 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8991 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8992
8993 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8994 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8995 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8996 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8997 }
8998 }
8999
9000 /* Fall back to the interpreter to emulate the task-switch. */
9001 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
9002 return VERR_EM_INTERPRETER;
9003}
9004
9005
9006/**
9007 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9008 */
9009HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9010{
9011 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9012
9013 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9014 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
9015 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9016 AssertRC(rc);
9017 return VINF_EM_DBG_STEPPED;
9018}
9019
9020
9021/**
9022 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9023 */
9024HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9025{
9026 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9027 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
9028
9029 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9030 | HMVMX_READ_EXIT_INSTR_LEN
9031 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9032 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9033 | HMVMX_READ_IDT_VECTORING_INFO
9034 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9035
9036 /*
9037 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9038 */
9039 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9040 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9041 {
9042 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
9043 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
9044 {
9045 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9046 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9047 }
9048 }
9049 else
9050 {
9051 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9052 return rcStrict;
9053 }
9054
9055 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
9056 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9057 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9058 AssertRCReturn(rc, rc);
9059
9060 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9061 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
9062 switch (uAccessType)
9063 {
9064#ifndef IN_NEM_DARWIN
9065 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9066 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9067 {
9068 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
9069 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
9070 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9071
9072 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
9073 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9074 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
9075 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
9076 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
9077
9078 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
9079 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
9080 Log4Func(("IOMR0MmioPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9081 if ( rcStrict == VINF_SUCCESS
9082 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9083 || rcStrict == VERR_PAGE_NOT_PRESENT)
9084 {
9085 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9086 | HM_CHANGED_GUEST_APIC_TPR);
9087 rcStrict = VINF_SUCCESS;
9088 }
9089 break;
9090 }
9091#else
9092 /** @todo */
9093#endif
9094
9095 default:
9096 {
9097 Log4Func(("uAccessType=%#x\n", uAccessType));
9098 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9099 break;
9100 }
9101 }
9102
9103 if (rcStrict != VINF_SUCCESS)
9104 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
9105 return rcStrict;
9106}
9107
9108
9109/**
9110 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9111 * VM-exit.
9112 */
9113HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9114{
9115 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9116 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9117
9118 /*
9119 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
9120 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
9121 * must emulate the MOV DRx access.
9122 */
9123 if (!pVmxTransient->fIsNestedGuest)
9124 {
9125 /* We should -not- get this VM-exit if the guest's debug registers were active. */
9126 if ( pVmxTransient->fWasGuestDebugStateActive
9127#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9128 && !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx
9129#endif
9130 )
9131 {
9132 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
9133 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
9134 }
9135
9136 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
9137 && !pVmxTransient->fWasHyperDebugStateActive)
9138 {
9139 Assert(!DBGFIsStepping(pVCpu));
9140 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
9141
9142 /* Whether we disable intercepting MOV DRx instructions and resume
9143 the current one, or emulate it and keep intercepting them is
9144 configurable. Though it usually comes down to whether there are
9145 any new DR6 & DR7 bits (RTM) we want to hide from the guest. */
9146#ifdef VMX_WITH_MAYBE_ALWAYS_INTERCEPT_MOV_DRX
9147 bool const fResumeInstruction = !pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fAlwaysInterceptMovDRx;
9148#else
9149 bool const fResumeInstruction = true;
9150#endif
9151 if (fResumeInstruction)
9152 {
9153 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
9154 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
9155 AssertRC(rc);
9156 }
9157
9158#ifndef IN_NEM_DARWIN
9159 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
9160 VMMRZCallRing3Disable(pVCpu);
9161 HM_DISABLE_PREEMPT(pVCpu);
9162
9163 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9164 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9165 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9166
9167 HM_RESTORE_PREEMPT();
9168 VMMRZCallRing3Enable(pVCpu);
9169#else
9170 CPUMR3NemActivateGuestDebugState(pVCpu);
9171 Assert(CPUMIsGuestDebugStateActive(pVCpu));
9172 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
9173#endif
9174
9175 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
9176 if (fResumeInstruction)
9177 {
9178#ifdef VBOX_WITH_STATISTICS
9179 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9180 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9181 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9182 else
9183 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9184#endif
9185 return VINF_SUCCESS;
9186 }
9187 }
9188 }
9189
9190 /*
9191 * Import state. We must have DR7 loaded here as it's always consulted,
9192 * both for reading and writing. The other debug registers are never
9193 * exported as such.
9194 */
9195 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9196 int rc = vmxHCImportGuestState< IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
9197 | CPUMCTX_EXTRN_GPRS_MASK
9198 | CPUMCTX_EXTRN_DR7>(pVCpu, pVmcsInfo, __FUNCTION__);
9199 AssertRCReturn(rc, rc);
9200
9201 uint8_t const iGReg = VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual);
9202 uint8_t const iDrReg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
9203 Log4Func(("cs:rip=%#04x:%08RX64 r%d %s dr%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iGReg,
9204 VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE ? "->" : "<-", iDrReg));
9205
9206 VBOXSTRICTRC rcStrict;
9207 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
9208 {
9209 /*
9210 * Write DRx register.
9211 */
9212 rcStrict = IEMExecDecodedMovDRxWrite(pVCpu, pVmxTransient->cbExitInstr, iDrReg, iGReg);
9213 AssertMsg( rcStrict == VINF_SUCCESS
9214 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9215
9216 if (rcStrict == VINF_SUCCESS)
9217 {
9218 /** @todo r=bird: Not sure why we always flag DR7 as modified here, but I've
9219 * kept it for now to avoid breaking something non-obvious. */
9220 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9221 | HM_CHANGED_GUEST_DR7);
9222 /* Update the DR6 register if guest debug state is active, otherwise we'll
9223 trash it when calling CPUMR0DebugStateMaybeSaveGuestAndRestoreHost. */
9224 if (iDrReg == 6 && CPUMIsGuestDebugStateActive(pVCpu))
9225 ASMSetDR6(pVCpu->cpum.GstCtx.dr[6]);
9226 Log4Func(("r%d=%#RX64 => dr%d=%#RX64\n", iGReg, pVCpu->cpum.GstCtx.aGRegs[iGReg].u,
9227 iDrReg, pVCpu->cpum.GstCtx.dr[iDrReg]));
9228 }
9229 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9230 {
9231 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9232 rcStrict = VINF_SUCCESS;
9233 }
9234
9235 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
9236 }
9237 else
9238 {
9239 /*
9240 * Read DRx register into a general purpose register.
9241 */
9242 rcStrict = IEMExecDecodedMovDRxRead(pVCpu, pVmxTransient->cbExitInstr, iGReg, iDrReg);
9243 AssertMsg( rcStrict == VINF_SUCCESS
9244 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9245
9246 if (rcStrict == VINF_SUCCESS)
9247 {
9248 if (iGReg == X86_GREG_xSP)
9249 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
9250 | HM_CHANGED_GUEST_RSP);
9251 else
9252 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9253 }
9254 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9255 {
9256 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9257 rcStrict = VINF_SUCCESS;
9258 }
9259
9260 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
9261 }
9262
9263 return rcStrict;
9264}
9265
9266
9267/**
9268 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9269 * Conditional VM-exit.
9270 */
9271HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9272{
9273 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9274
9275#ifndef IN_NEM_DARWIN
9276 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9277
9278 vmxHCReadToTransient< HMVMX_READ_EXIT_INSTR_LEN
9279 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9280 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9281 | HMVMX_READ_IDT_VECTORING_INFO
9282 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9283 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9284
9285 /*
9286 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9287 */
9288 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9289 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9290 {
9291 /*
9292 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
9293 * instruction emulation to inject the original event. Otherwise, injecting the original event
9294 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
9295 */
9296 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9297 { /* likely */ }
9298 else
9299 {
9300 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
9301# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9302 /** @todo NSTVMX: Think about how this should be handled. */
9303 if (pVmxTransient->fIsNestedGuest)
9304 return VERR_VMX_IPE_3;
9305# endif
9306 return VINF_EM_RAW_INJECT_TRPM_EVENT;
9307 }
9308 }
9309 else
9310 {
9311 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9312 return rcStrict;
9313 }
9314
9315 /*
9316 * Get sufficient state and update the exit history entry.
9317 */
9318 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9319 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9320 AssertRCReturn(rc, rc);
9321
9322 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9323 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
9324 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
9325 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
9326 if (!pExitRec)
9327 {
9328 /*
9329 * If we succeed, resume guest execution.
9330 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9331 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9332 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9333 * weird case. See @bugref{6043}.
9334 */
9335 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9336/** @todo bird: We can probably just go straight to IOM here and assume that
9337 * it's MMIO, then fall back on PGM if that hunch didn't work out so
9338 * well. However, we need to address that aliasing workarounds that
9339 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
9340 *
9341 * Might also be interesting to see if we can get this done more or
9342 * less locklessly inside IOM. Need to consider the lookup table
9343 * updating and use a bit more carefully first (or do all updates via
9344 * rendezvous) */
9345 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, &pVCpu->cpum.GstCtx, GCPhys, UINT32_MAX);
9346 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
9347 if ( rcStrict == VINF_SUCCESS
9348 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9349 || rcStrict == VERR_PAGE_NOT_PRESENT)
9350 {
9351 /* Successfully handled MMIO operation. */
9352 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9353 | HM_CHANGED_GUEST_APIC_TPR);
9354 rcStrict = VINF_SUCCESS;
9355 }
9356 }
9357 else
9358 {
9359 /*
9360 * Frequent exit or something needing probing. Call EMHistoryExec.
9361 */
9362 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
9363 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
9364
9365 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9366 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9367
9368 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9369 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9370 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9371 }
9372 return rcStrict;
9373#else
9374 AssertFailed();
9375 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
9376#endif
9377}
9378
9379
9380/**
9381 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9382 * VM-exit.
9383 */
9384HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9385{
9386 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9387#ifndef IN_NEM_DARWIN
9388 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9389
9390 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9391 | HMVMX_READ_EXIT_INSTR_LEN
9392 | HMVMX_READ_EXIT_INTERRUPTION_INFO
9393 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9394 | HMVMX_READ_IDT_VECTORING_INFO
9395 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
9396 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9397
9398 /*
9399 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
9400 */
9401 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
9402 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9403 {
9404 /*
9405 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
9406 * we shall resolve the nested #PF and re-inject the original event.
9407 */
9408 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
9409 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
9410 }
9411 else
9412 {
9413 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
9414 return rcStrict;
9415 }
9416
9417 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9418 int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmcsInfo, __FUNCTION__);
9419 AssertRCReturn(rc, rc);
9420
9421 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
9422 uint64_t const uExitQual = pVmxTransient->uExitQual;
9423 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
9424
9425 RTGCUINT uErrorCode = 0;
9426 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
9427 uErrorCode |= X86_TRAP_PF_ID;
9428 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9429 uErrorCode |= X86_TRAP_PF_RW;
9430 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
9431 uErrorCode |= X86_TRAP_PF_P;
9432
9433 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9434 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%08RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
9435
9436 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9437
9438 /*
9439 * Handle the pagefault trap for the nested shadow table.
9440 */
9441 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
9442 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, pCtx, GCPhys);
9443 TRPMResetTrap(pVCpu);
9444
9445 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
9446 if ( rcStrict == VINF_SUCCESS
9447 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
9448 || rcStrict == VERR_PAGE_NOT_PRESENT)
9449 {
9450 /* Successfully synced our nested page tables. */
9451 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
9452 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
9453 return VINF_SUCCESS;
9454 }
9455 Log4Func(("EPT return to ring-3 rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9456 return rcStrict;
9457
9458#else /* IN_NEM_DARWIN */
9459 PVM pVM = pVCpu->CTX_SUFF(pVM);
9460 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
9461 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9462 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
9463 vmxHCImportGuestRip(pVCpu);
9464 vmxHCImportGuestSegReg<X86_SREG_CS>(pVCpu);
9465
9466 /*
9467 * Ask PGM for information about the given GCPhys. We need to check if we're
9468 * out of sync first.
9469 */
9470 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE),
9471 false,
9472 false };
9473 PGMPHYSNEMPAGEINFO Info;
9474 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
9475 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
9476 if (RT_SUCCESS(rc))
9477 {
9478 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9479 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
9480 {
9481 if (State.fCanResume)
9482 {
9483 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
9484 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9485 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9486 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9487 State.fDidSomething ? "" : " no-change"));
9488 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
9489 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9490 return VINF_SUCCESS;
9491 }
9492 }
9493
9494 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
9495 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9496 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
9497 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
9498 State.fDidSomething ? "" : " no-change"));
9499 }
9500 else
9501 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
9502 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9503 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
9504
9505 /*
9506 * Emulate the memory access, either access handler or special memory.
9507 */
9508 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
9509 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
9510 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
9511 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
9512 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
9513
9514 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9515 AssertRCReturn(rc, rc);
9516
9517 VBOXSTRICTRC rcStrict;
9518 if (!pExitRec)
9519 rcStrict = IEMExecOne(pVCpu);
9520 else
9521 {
9522 /* Frequent access or probing. */
9523 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9524 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9525 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9526 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9527 }
9528
9529 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9530
9531 Log4Func(("EPT return rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9532 return rcStrict;
9533#endif /* IN_NEM_DARWIN */
9534}
9535
9536#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9537
9538/**
9539 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
9540 */
9541HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9542{
9543 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9544
9545 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9546 | HMVMX_READ_EXIT_INSTR_INFO
9547 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9548 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9549 | CPUMCTX_EXTRN_SREG_MASK
9550 | CPUMCTX_EXTRN_HWVIRT
9551 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9552 AssertRCReturn(rc, rc);
9553
9554 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9555
9556 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9557 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9558
9559 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
9560 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9561 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9562 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9563 {
9564 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9565 rcStrict = VINF_SUCCESS;
9566 }
9567 return rcStrict;
9568}
9569
9570
9571/**
9572 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
9573 */
9574HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9575{
9576 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9577
9578 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
9579 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9580 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9581 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9582 AssertRCReturn(rc, rc);
9583
9584 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9585
9586 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9587 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
9588 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9589 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9590 {
9591 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9592 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9593 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9594 }
9595 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9596 return rcStrict;
9597}
9598
9599
9600/**
9601 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
9602 */
9603HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9604{
9605 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9606
9607 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9608 | HMVMX_READ_EXIT_INSTR_INFO
9609 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9610 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9611 | CPUMCTX_EXTRN_SREG_MASK
9612 | CPUMCTX_EXTRN_HWVIRT
9613 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9614 AssertRCReturn(rc, rc);
9615
9616 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9617
9618 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9619 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9620
9621 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9622 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9623 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9624 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9625 {
9626 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9627 rcStrict = VINF_SUCCESS;
9628 }
9629 return rcStrict;
9630}
9631
9632
9633/**
9634 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9635 */
9636HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9637{
9638 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9639
9640 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9641 | HMVMX_READ_EXIT_INSTR_INFO
9642 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9643 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9644 | CPUMCTX_EXTRN_SREG_MASK
9645 | CPUMCTX_EXTRN_HWVIRT
9646 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9647 AssertRCReturn(rc, rc);
9648
9649 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9650
9651 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9652 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9653
9654 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9655 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9656 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9657 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9658 {
9659 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9660 rcStrict = VINF_SUCCESS;
9661 }
9662 return rcStrict;
9663}
9664
9665
9666/**
9667 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9668 */
9669HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9670{
9671 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9672
9673 /*
9674 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9675 * thus might not need to import the shadow VMCS state, it's safer just in case
9676 * code elsewhere dares look at unsynced VMCS fields.
9677 */
9678 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9679 | HMVMX_READ_EXIT_INSTR_INFO
9680 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9681 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9682 | CPUMCTX_EXTRN_SREG_MASK
9683 | CPUMCTX_EXTRN_HWVIRT
9684 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9685 AssertRCReturn(rc, rc);
9686
9687 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9688
9689 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9690 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9691 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9692
9693 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9694 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9695 {
9696 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9697
9698# if 0 //ndef IN_NEM_DARWIN /** @todo this needs serious tuning still, slows down things enormously. */
9699 /* Try for exit optimization. This is on the following instruction
9700 because it would be a waste of time to have to reinterpret the
9701 already decoded vmwrite instruction. */
9702 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndType(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_VMREAD));
9703 if (pExitRec)
9704 {
9705 /* Frequent access or probing. */
9706 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9707 AssertRCReturn(rc, rc);
9708
9709 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
9710 Log4(("vmread/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
9711 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9712 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
9713 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9714 }
9715# endif
9716 }
9717 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9718 {
9719 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9720 rcStrict = VINF_SUCCESS;
9721 }
9722 return rcStrict;
9723}
9724
9725
9726/**
9727 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9728 */
9729HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9730{
9731 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9732
9733 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9734 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9735 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9736 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9737 AssertRCReturn(rc, rc);
9738
9739 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9740
9741 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9742 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9743 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9744 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9745 {
9746 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9747 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9748 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9749 }
9750 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9751 return rcStrict;
9752}
9753
9754
9755/**
9756 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9757 */
9758HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9759{
9760 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9761
9762 /*
9763 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9764 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9765 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9766 */
9767 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9768 | HMVMX_READ_EXIT_INSTR_INFO
9769 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9770 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9771 | CPUMCTX_EXTRN_SREG_MASK
9772 | CPUMCTX_EXTRN_HWVIRT
9773 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9774 AssertRCReturn(rc, rc);
9775
9776 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9777
9778 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9779 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9780 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9781
9782 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9783 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9784 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9785 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9786 {
9787 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9788 rcStrict = VINF_SUCCESS;
9789 }
9790 return rcStrict;
9791}
9792
9793
9794/**
9795 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9796 */
9797HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9798{
9799 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9800
9801 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9802 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_CR4
9803 | CPUMCTX_EXTRN_HWVIRT
9804 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9805 AssertRCReturn(rc, rc);
9806
9807 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9808
9809 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9810 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9811 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9812 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9813 {
9814 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9815 rcStrict = VINF_SUCCESS;
9816 }
9817 return rcStrict;
9818}
9819
9820
9821/**
9822 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9823 */
9824HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9825{
9826 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9827
9828 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9829 | HMVMX_READ_EXIT_INSTR_INFO
9830 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9831 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9832 | CPUMCTX_EXTRN_SREG_MASK
9833 | CPUMCTX_EXTRN_HWVIRT
9834 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9835 AssertRCReturn(rc, rc);
9836
9837 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9838
9839 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9840 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9841
9842 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9843 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9844 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9845 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9846 {
9847 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9848 rcStrict = VINF_SUCCESS;
9849 }
9850 return rcStrict;
9851}
9852
9853
9854/**
9855 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9856 */
9857HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9858{
9859 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9860
9861 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9862 | HMVMX_READ_EXIT_INSTR_INFO
9863 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9864 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9865 | CPUMCTX_EXTRN_SREG_MASK
9866 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9867 AssertRCReturn(rc, rc);
9868
9869 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9870
9871 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9872 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9873
9874 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9875 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9876 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9877 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9878 {
9879 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9880 rcStrict = VINF_SUCCESS;
9881 }
9882 return rcStrict;
9883}
9884
9885
9886# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9887/**
9888 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9889 */
9890HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9891{
9892 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9893
9894 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
9895 | HMVMX_READ_EXIT_INSTR_INFO
9896 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
9897 int rc = vmxHCImportGuestState< CPUMCTX_EXTRN_RSP
9898 | CPUMCTX_EXTRN_SREG_MASK
9899 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
9900 AssertRCReturn(rc, rc);
9901
9902 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9903
9904 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
9905 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9906
9907 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9908 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9909 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9910 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9911 {
9912 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9913 rcStrict = VINF_SUCCESS;
9914 }
9915 return rcStrict;
9916}
9917# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9918#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9919/** @} */
9920
9921
9922#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9923/** @name Nested-guest VM-exit handlers.
9924 * @{
9925 */
9926/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9927/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9928/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9929
9930/**
9931 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9932 * Conditional VM-exit.
9933 */
9934HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9935{
9936 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9937
9938 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
9939
9940 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9941 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9942 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9943
9944 switch (uExitIntType)
9945 {
9946# ifndef IN_NEM_DARWIN
9947 /*
9948 * Physical NMIs:
9949 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9950 */
9951 case VMX_EXIT_INT_INFO_TYPE_NMI:
9952 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9953# endif
9954
9955 /*
9956 * Hardware exceptions,
9957 * Software exceptions,
9958 * Privileged software exceptions:
9959 * Figure out if the exception must be delivered to the guest or the nested-guest.
9960 */
9961 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9962 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9963 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9964 {
9965 vmxHCReadToTransient< HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
9966 | HMVMX_READ_EXIT_INSTR_LEN
9967 | HMVMX_READ_IDT_VECTORING_INFO
9968 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
9969
9970 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9971 if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
9972 {
9973 /* Exit qualification is required for debug and page-fault exceptions. */
9974 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
9975
9976 /*
9977 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9978 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9979 * length. However, if delivery of a software interrupt, software exception or privileged
9980 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9981 */
9982 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
9983 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT(pVmxTransient->uExitIntInfo,
9984 pVmxTransient->uExitIntErrorCode,
9985 pVmxTransient->uIdtVectoringInfo,
9986 pVmxTransient->uIdtVectoringErrorCode);
9987#ifdef DEBUG_ramshankar
9988 vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9989 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n",
9990 pVmxTransient->uExitIntInfo, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9991 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9992 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n",
9993 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9994#endif
9995 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9996 }
9997
9998 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9999 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10000 return vmxHCExitXcpt(pVCpu, pVmxTransient);
10001 }
10002
10003 /*
10004 * Software interrupts:
10005 * VM-exits cannot be caused by software interrupts.
10006 *
10007 * External interrupts:
10008 * This should only happen when "acknowledge external interrupts on VM-exit"
10009 * control is set. However, we never set this when executing a guest or
10010 * nested-guest. For nested-guests it is emulated while injecting interrupts into
10011 * the guest.
10012 */
10013 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10014 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
10015 default:
10016 {
10017 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
10018 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10019 }
10020 }
10021}
10022
10023
10024/**
10025 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
10026 * Unconditional VM-exit.
10027 */
10028HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10029{
10030 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10031 return IEMExecVmxVmexitTripleFault(pVCpu);
10032}
10033
10034
10035/**
10036 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10037 */
10038HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10039{
10040 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10041
10042 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
10043 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10044 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10045}
10046
10047
10048/**
10049 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10050 */
10051HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10052{
10053 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10054
10055 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
10056 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
10057 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
10058}
10059
10060
10061/**
10062 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
10063 * Unconditional VM-exit.
10064 */
10065HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10066{
10067 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10068
10069 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10070 | HMVMX_READ_EXIT_INSTR_LEN
10071 | HMVMX_READ_IDT_VECTORING_INFO
10072 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10073
10074 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10075 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10076 pVmxTransient->uIdtVectoringErrorCode);
10077 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
10078}
10079
10080
10081/**
10082 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10083 */
10084HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10085{
10086 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10087
10088 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
10089 {
10090 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10091 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10092 }
10093 return vmxHCExitHlt(pVCpu, pVmxTransient);
10094}
10095
10096
10097/**
10098 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10099 */
10100HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10101{
10102 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10103
10104 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10105 {
10106 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10107 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10108 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10109 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10110 }
10111 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
10112}
10113
10114
10115/**
10116 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10117 */
10118HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10119{
10120 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10121
10122 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
10123 {
10124 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10125 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10126 }
10127 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
10128}
10129
10130
10131/**
10132 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
10133 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
10134 */
10135HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10136{
10137 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10138
10139 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
10140 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
10141
10142 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10143
10144 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
10145 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10146 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10147
10148 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
10149 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10150 u64VmcsField &= UINT64_C(0xffffffff);
10151
10152 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
10153 {
10154 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10155 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10156 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10157 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10158 }
10159
10160 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
10161 return vmxHCExitVmread(pVCpu, pVmxTransient);
10162 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
10163}
10164
10165
10166/**
10167 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10168 */
10169HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10170{
10171 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10172
10173 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10174 {
10175 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10176 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10177 }
10178
10179 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
10180}
10181
10182
10183/**
10184 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
10185 * Conditional VM-exit.
10186 */
10187HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10188{
10189 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10190
10191 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10192 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10193
10194 VBOXSTRICTRC rcStrict;
10195 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
10196 switch (uAccessType)
10197 {
10198 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
10199 {
10200 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10201 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10202 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10203 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
10204
10205 bool fIntercept;
10206 switch (iCrReg)
10207 {
10208 case 0:
10209 case 4:
10210 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
10211 break;
10212
10213 case 3:
10214 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
10215 break;
10216
10217 case 8:
10218 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
10219 break;
10220
10221 default:
10222 fIntercept = false;
10223 break;
10224 }
10225 if (fIntercept)
10226 {
10227 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10228 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10229 }
10230 else
10231 {
10232 int const rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
10233 AssertRCReturn(rc, rc);
10234 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10235 }
10236 break;
10237 }
10238
10239 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
10240 {
10241 /*
10242 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
10243 * CR2 reads do not cause a VM-exit.
10244 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
10245 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
10246 */
10247 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10248 if ( iCrReg == 3
10249 || iCrReg == 8)
10250 {
10251 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
10252 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
10253 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
10254 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
10255 {
10256 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10257 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10258 }
10259 else
10260 {
10261 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
10262 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
10263 }
10264 }
10265 else
10266 {
10267 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
10268 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
10269 }
10270 break;
10271 }
10272
10273 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
10274 {
10275 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10276 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
10277 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
10278 if ( (uGstHostMask & X86_CR0_TS)
10279 && (uReadShadow & X86_CR0_TS))
10280 {
10281 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10282 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10283 }
10284 else
10285 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
10286 break;
10287 }
10288
10289 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10290 {
10291 RTGCPTR GCPtrEffDst;
10292 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
10293 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
10294 if (fMemOperand)
10295 {
10296 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10297 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
10298 }
10299 else
10300 GCPtrEffDst = NIL_RTGCPTR;
10301
10302 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
10303 {
10304 VMXVEXITINFO ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10305 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
10306 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10307 }
10308 else
10309 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
10310 break;
10311 }
10312
10313 default:
10314 {
10315 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
10316 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
10317 }
10318 }
10319
10320 if (rcStrict == VINF_IEM_RAISED_XCPT)
10321 {
10322 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10323 rcStrict = VINF_SUCCESS;
10324 }
10325 return rcStrict;
10326}
10327
10328
10329/**
10330 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
10331 * Conditional VM-exit.
10332 */
10333HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10334{
10335 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10336
10337 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
10338 {
10339 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10340 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10341 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10342 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10343 }
10344 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
10345}
10346
10347
10348/**
10349 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
10350 * Conditional VM-exit.
10351 */
10352HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10353{
10354 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10355
10356 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10357
10358 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
10359 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
10360 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
10361
10362 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
10363 uint8_t const cbAccess = s_aIOSizes[uIOSize];
10364 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
10365 {
10366 /*
10367 * IN/OUT instruction:
10368 * - Provides VM-exit instruction length.
10369 *
10370 * INS/OUTS instruction:
10371 * - Provides VM-exit instruction length.
10372 * - Provides Guest-linear address.
10373 * - Optionally provides VM-exit instruction info (depends on CPU feature).
10374 */
10375 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10376 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10377
10378 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
10379 pVmxTransient->ExitInstrInfo.u = 0;
10380 pVmxTransient->uGuestLinearAddr = 0;
10381
10382 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
10383 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
10384 if (fIOString)
10385 {
10386 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10387 if (fVmxInsOutsInfo)
10388 {
10389 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
10390 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10391 }
10392 }
10393
10394 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_AND_LIN_ADDR_FROM_TRANSIENT(pVmxTransient);
10395 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10396 }
10397 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
10398}
10399
10400
10401/**
10402 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10403 */
10404HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10405{
10406 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10407
10408 uint32_t fMsrpm;
10409 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10410 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10411 else
10412 fMsrpm = VMXMSRPM_EXIT_RD;
10413
10414 if (fMsrpm & VMXMSRPM_EXIT_RD)
10415 {
10416 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10417 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10418 }
10419 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
10420}
10421
10422
10423/**
10424 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10425 */
10426HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10427{
10428 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10429
10430 uint32_t fMsrpm;
10431 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
10432 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
10433 else
10434 fMsrpm = VMXMSRPM_EXIT_WR;
10435
10436 if (fMsrpm & VMXMSRPM_EXIT_WR)
10437 {
10438 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10439 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10440 }
10441 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
10442}
10443
10444
10445/**
10446 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10447 */
10448HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10449{
10450 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10451
10452 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
10453 {
10454 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10455 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10456 }
10457 return vmxHCExitMwait(pVCpu, pVmxTransient);
10458}
10459
10460
10461/**
10462 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
10463 * VM-exit.
10464 */
10465HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10466{
10467 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10468
10469 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
10470 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10471 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10472 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10473}
10474
10475
10476/**
10477 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10478 */
10479HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10480{
10481 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10482
10483 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
10484 {
10485 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10486 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10487 }
10488 return vmxHCExitMonitor(pVCpu, pVmxTransient);
10489}
10490
10491
10492/**
10493 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10494 */
10495HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10496{
10497 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10498
10499 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
10500 * PAUSE when executing a nested-guest? If it does not, we would not need
10501 * to check for the intercepts here. Just call VM-exit... */
10502
10503 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
10504 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
10505 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10506 {
10507 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10508 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10509 }
10510 return vmxHCExitPause(pVCpu, pVmxTransient);
10511}
10512
10513
10514/**
10515 * Nested-guest VM-exit handler for when the TPR value is lowered below the
10516 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10517 */
10518HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10519{
10520 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10521
10522 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
10523 {
10524 vmxHCReadToTransient<HMVMX_READ_GUEST_PENDING_DBG_XCPTS>(pVCpu, pVmxTransient);
10525 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_DBG_XCPTS_FROM_TRANSIENT(pVmxTransient);
10526 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
10527 }
10528 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
10529}
10530
10531
10532/**
10533 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
10534 * VM-exit.
10535 */
10536HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10537{
10538 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10539
10540 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10541 | HMVMX_READ_EXIT_INSTR_LEN
10542 | HMVMX_READ_IDT_VECTORING_INFO
10543 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10544
10545 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10546
10547 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10548 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10549
10550 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_FROM_TRANSIENT(pVmxTransient);
10551 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10552 pVmxTransient->uIdtVectoringErrorCode);
10553 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10554}
10555
10556
10557/**
10558 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10559 * Conditional VM-exit.
10560 */
10561HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10562{
10563 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10564
10565 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10566 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10567 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10568}
10569
10570
10571/**
10572 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10573 * Conditional VM-exit.
10574 */
10575HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10576{
10577 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10578
10579 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10580 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
10581 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10582}
10583
10584
10585/**
10586 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10587 */
10588HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10589{
10590 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10591
10592 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10593 {
10594 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10595 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10596 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10597 }
10598 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10599}
10600
10601
10602/**
10603 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10604 */
10605HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10606{
10607 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10608
10609 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10610 {
10611 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10612 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10613 }
10614 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10615}
10616
10617
10618/**
10619 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10620 */
10621HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10622{
10623 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10624
10625 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10626 {
10627 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10628 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10629 | HMVMX_READ_EXIT_INSTR_INFO
10630 | HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10631 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10632 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10633 }
10634 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10635}
10636
10637
10638/**
10639 * Nested-guest VM-exit handler for invalid-guest state
10640 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10641 */
10642HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10643{
10644 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10645
10646 /*
10647 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10648 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10649 * Handle it like it's in an invalid guest state of the outer guest.
10650 *
10651 * When the fast path is implemented, this should be changed to cause the corresponding
10652 * nested-guest VM-exit.
10653 */
10654 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10655}
10656
10657
10658/**
10659 * Nested-guest VM-exit handler for instructions that cause VM-exits unconditionally
10660 * and only provide the instruction length.
10661 *
10662 * Unconditional VM-exit.
10663 */
10664HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10665{
10666 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10667
10668#ifdef VBOX_STRICT
10669 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10670 switch (pVmxTransient->uExitReason)
10671 {
10672 case VMX_EXIT_ENCLS:
10673 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10674 break;
10675
10676 case VMX_EXIT_VMFUNC:
10677 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10678 break;
10679 }
10680#endif
10681
10682 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_LEN>(pVCpu, pVmxTransient);
10683 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10684}
10685
10686
10687/**
10688 * Nested-guest VM-exit handler for instructions that provide instruction length as
10689 * well as more information.
10690 *
10691 * Unconditional VM-exit.
10692 */
10693HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10694{
10695 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10696
10697# ifdef VBOX_STRICT
10698 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10699 switch (pVmxTransient->uExitReason)
10700 {
10701 case VMX_EXIT_GDTR_IDTR_ACCESS:
10702 case VMX_EXIT_LDTR_TR_ACCESS:
10703 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10704 break;
10705
10706 case VMX_EXIT_RDRAND:
10707 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10708 break;
10709
10710 case VMX_EXIT_RDSEED:
10711 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10712 break;
10713
10714 case VMX_EXIT_XSAVES:
10715 case VMX_EXIT_XRSTORS:
10716 /** @todo NSTVMX: Verify XSS-bitmap. */
10717 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10718 break;
10719
10720 case VMX_EXIT_UMWAIT:
10721 case VMX_EXIT_TPAUSE:
10722 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10723 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10724 break;
10725
10726 case VMX_EXIT_LOADIWKEY:
10727 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10728 break;
10729 }
10730# endif
10731
10732 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10733 | HMVMX_READ_EXIT_INSTR_LEN
10734 | HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
10735 VMXVEXITINFO const ExitInfo = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_INFO_FROM_TRANSIENT(pVmxTransient);
10736 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10737}
10738
10739# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10740
10741/**
10742 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10743 * Conditional VM-exit.
10744 */
10745HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10746{
10747 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10748 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10749
10750 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10751 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10752 {
10753 vmxHCReadToTransient< HMVMX_READ_EXIT_QUALIFICATION
10754 | HMVMX_READ_EXIT_INSTR_LEN
10755 | HMVMX_READ_EXIT_INTERRUPTION_INFO
10756 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
10757 | HMVMX_READ_IDT_VECTORING_INFO
10758 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
10759 | HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10760 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10761 AssertRCReturn(rc, rc);
10762
10763 /*
10764 * If it's our VMEXIT, we're responsible for re-injecting any event which delivery
10765 * might have triggered this VMEXIT. If we forward the problem to the inner VMM,
10766 * it's its problem to deal with that issue and we'll clear the recovered event.
10767 */
10768 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
10769 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10770 { /*likely*/ }
10771 else
10772 {
10773 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
10774 return rcStrict;
10775 }
10776 uint32_t const fClearEventOnForward = VCPU_2_VMXSTATE(pVCpu).Event.fPending; /* paranoia. should not inject events below. */
10777
10778 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10779 uint64_t const uExitQual = pVmxTransient->uExitQual;
10780
10781 RTGCPTR GCPtrNestedFault;
10782 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10783 if (fIsLinearAddrValid)
10784 {
10785 vmxHCReadToTransient<HMVMX_READ_GUEST_LINEAR_ADDR>(pVCpu, pVmxTransient);
10786 GCPtrNestedFault = pVmxTransient->uGuestLinearAddr;
10787 }
10788 else
10789 GCPtrNestedFault = 0;
10790
10791 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10792 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10793 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10794 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10795 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10796
10797 PGMPTWALK Walk;
10798 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10799 rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, pCtx, GCPhysNestedFault,
10800 fIsLinearAddrValid, GCPtrNestedFault, &Walk);
10801 Log7Func(("PGM (uExitQual=%#RX64, %RGp, %RGv) -> %Rrc (fFailed=%d)\n",
10802 uExitQual, GCPhysNestedFault, GCPtrNestedFault, VBOXSTRICTRC_VAL(rcStrict), Walk.fFailed));
10803 if (RT_SUCCESS(rcStrict))
10804 return rcStrict;
10805
10806 if (fClearEventOnForward)
10807 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
10808
10809 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10810 pVmxTransient->uIdtVectoringErrorCode);
10811 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10812 {
10813 VMXVEXITINFO const ExitInfo
10814 = VMXVEXITINFO_INIT_WITH_QUAL_AND_INSTR_LEN_AND_GST_ADDRESSES(VMX_EXIT_EPT_VIOLATION,
10815 pVmxTransient->uExitQual,
10816 pVmxTransient->cbExitInstr,
10817 pVmxTransient->uGuestLinearAddr,
10818 pVmxTransient->uGuestPhysicalAddr);
10819 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10820 }
10821
10822 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10823 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10824 }
10825
10826 return vmxHCExitEptViolation(pVCpu, pVmxTransient);
10827}
10828
10829
10830/**
10831 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10832 * Conditional VM-exit.
10833 */
10834HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10835{
10836 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10837 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10838
10839 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10840 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_EPT))
10841 {
10842 vmxHCReadToTransient<HMVMX_READ_GUEST_PHYSICAL_ADDR>(pVCpu, pVmxTransient);
10843 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_ALL>(pVCpu, pVmcsInfo, __FUNCTION__);
10844 AssertRCReturn(rc, rc);
10845
10846 PGMPTWALK Walk;
10847 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10848 RTGCPHYS const GCPhysNestedFault = pVmxTransient->uGuestPhysicalAddr;
10849 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, pCtx,
10850 GCPhysNestedFault, false /* fIsLinearAddrValid */,
10851 0 /* GCPtrNestedFault */, &Walk);
10852 if (RT_SUCCESS(rcStrict))
10853 {
10854 AssertMsgFailed(("Shouldn't happen with the way we have programmed the EPT shadow tables\n"));
10855 return rcStrict;
10856 }
10857
10858 AssertMsg(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG, ("GCPhysNestedFault=%#RGp\n", GCPhysNestedFault));
10859 vmxHCReadToTransient< HMVMX_READ_IDT_VECTORING_INFO
10860 | HMVMX_READ_IDT_VECTORING_ERROR_CODE>(pVCpu, pVmxTransient);
10861
10862 VMXVEXITEVENTINFO const ExitEventInfo = VMXVEXITEVENTINFO_INIT_ONLY_IDT(pVmxTransient->uIdtVectoringInfo,
10863 pVmxTransient->uIdtVectoringErrorCode);
10864 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10865 }
10866
10867 return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
10868}
10869
10870# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10871
10872/** @} */
10873#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10874
10875
10876/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10877 * probes.
10878 *
10879 * The following few functions and associated structure contains the bloat
10880 * necessary for providing detailed debug events and dtrace probes as well as
10881 * reliable host side single stepping. This works on the principle of
10882 * "subclassing" the normal execution loop and workers. We replace the loop
10883 * method completely and override selected helpers to add necessary adjustments
10884 * to their core operation.
10885 *
10886 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10887 * any performance for debug and analysis features.
10888 *
10889 * @{
10890 */
10891
10892/**
10893 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10894 * the debug run loop.
10895 */
10896typedef struct VMXRUNDBGSTATE
10897{
10898 /** The RIP we started executing at. This is for detecting that we stepped. */
10899 uint64_t uRipStart;
10900 /** The CS we started executing with. */
10901 uint16_t uCsStart;
10902
10903 /** Whether we've actually modified the 1st execution control field. */
10904 bool fModifiedProcCtls : 1;
10905 /** Whether we've actually modified the 2nd execution control field. */
10906 bool fModifiedProcCtls2 : 1;
10907 /** Whether we've actually modified the exception bitmap. */
10908 bool fModifiedXcptBitmap : 1;
10909
10910 /** We desire the modified the CR0 mask to be cleared. */
10911 bool fClearCr0Mask : 1;
10912 /** We desire the modified the CR4 mask to be cleared. */
10913 bool fClearCr4Mask : 1;
10914 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10915 uint32_t fCpe1Extra;
10916 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10917 uint32_t fCpe1Unwanted;
10918 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10919 uint32_t fCpe2Extra;
10920 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10921 uint32_t bmXcptExtra;
10922 /** The sequence number of the Dtrace provider settings the state was
10923 * configured against. */
10924 uint32_t uDtraceSettingsSeqNo;
10925 /** VM-exits to check (one bit per VM-exit). */
10926 uint32_t bmExitsToCheck[3];
10927
10928 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10929 uint32_t fProcCtlsInitial;
10930 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10931 uint32_t fProcCtls2Initial;
10932 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10933 uint32_t bmXcptInitial;
10934} VMXRUNDBGSTATE;
10935AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10936typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10937
10938
10939/**
10940 * Initializes the VMXRUNDBGSTATE structure.
10941 *
10942 * @param pVCpu The cross context virtual CPU structure of the
10943 * calling EMT.
10944 * @param pVmxTransient The VMX-transient structure.
10945 * @param pDbgState The debug state to initialize.
10946 */
10947static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10948{
10949 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10950 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10951
10952 pDbgState->fModifiedProcCtls = false;
10953 pDbgState->fModifiedProcCtls2 = false;
10954 pDbgState->fModifiedXcptBitmap = false;
10955 pDbgState->fClearCr0Mask = false;
10956 pDbgState->fClearCr4Mask = false;
10957 pDbgState->fCpe1Extra = 0;
10958 pDbgState->fCpe1Unwanted = 0;
10959 pDbgState->fCpe2Extra = 0;
10960 pDbgState->bmXcptExtra = 0;
10961 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10962 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10963 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10964}
10965
10966
10967/**
10968 * Updates the VMSC fields with changes requested by @a pDbgState.
10969 *
10970 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10971 * immediately before executing guest code, i.e. when interrupts are disabled.
10972 * We don't check status codes here as we cannot easily assert or return in the
10973 * latter case.
10974 *
10975 * @param pVCpu The cross context virtual CPU structure.
10976 * @param pVmxTransient The VMX-transient structure.
10977 * @param pDbgState The debug state.
10978 */
10979static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10980{
10981 /*
10982 * Ensure desired flags in VMCS control fields are set.
10983 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10984 *
10985 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10986 * there should be no stale data in pCtx at this point.
10987 */
10988 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10989 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10990 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10991 {
10992 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10993 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10994 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10995 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10996 pDbgState->fModifiedProcCtls = true;
10997 }
10998
10999 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11000 {
11001 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11002 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11003 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11004 pDbgState->fModifiedProcCtls2 = true;
11005 }
11006
11007 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11008 {
11009 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11010 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11011 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11012 pDbgState->fModifiedXcptBitmap = true;
11013 }
11014
11015 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11016 {
11017 pVmcsInfo->u64Cr0Mask = 0;
11018 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
11019 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11020 }
11021
11022 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11023 {
11024 pVmcsInfo->u64Cr4Mask = 0;
11025 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
11026 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11027 }
11028
11029 NOREF(pVCpu);
11030}
11031
11032
11033/**
11034 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11035 * re-entry next time around.
11036 *
11037 * @returns Strict VBox status code (i.e. informational status codes too).
11038 * @param pVCpu The cross context virtual CPU structure.
11039 * @param pVmxTransient The VMX-transient structure.
11040 * @param pDbgState The debug state.
11041 * @param rcStrict The return code from executing the guest using single
11042 * stepping.
11043 */
11044static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11045 VBOXSTRICTRC rcStrict)
11046{
11047 /*
11048 * Restore VM-exit control settings as we may not reenter this function the
11049 * next time around.
11050 */
11051 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11052
11053 /* We reload the initial value, trigger what we can of recalculations the
11054 next time around. From the looks of things, that's all that's required atm. */
11055 if (pDbgState->fModifiedProcCtls)
11056 {
11057 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11058 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11059 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11060 AssertRC(rc2);
11061 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11062 }
11063
11064 /* We're currently the only ones messing with this one, so just restore the
11065 cached value and reload the field. */
11066 if ( pDbgState->fModifiedProcCtls2
11067 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11068 {
11069 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11070 AssertRC(rc2);
11071 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11072 }
11073
11074 /* If we've modified the exception bitmap, we restore it and trigger
11075 reloading and partial recalculation the next time around. */
11076 if (pDbgState->fModifiedXcptBitmap)
11077 {
11078 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pDbgState->bmXcptInitial);
11079 AssertRC(rc2);
11080 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11081 }
11082
11083 return rcStrict;
11084}
11085
11086
11087/**
11088 * Configures VM-exit controls for current DBGF and DTrace settings.
11089 *
11090 * This updates @a pDbgState and the VMCS execution control fields to reflect
11091 * the necessary VM-exits demanded by DBGF and DTrace.
11092 *
11093 * @param pVCpu The cross context virtual CPU structure.
11094 * @param pVmxTransient The VMX-transient structure. May update
11095 * fUpdatedTscOffsettingAndPreemptTimer.
11096 * @param pDbgState The debug state.
11097 */
11098static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11099{
11100#ifndef IN_NEM_DARWIN
11101 /*
11102 * Take down the dtrace serial number so we can spot changes.
11103 */
11104 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11105 ASMCompilerBarrier();
11106#endif
11107
11108 /*
11109 * We'll rebuild most of the middle block of data members (holding the
11110 * current settings) as we go along here, so start by clearing it all.
11111 */
11112 pDbgState->bmXcptExtra = 0;
11113 pDbgState->fCpe1Extra = 0;
11114 pDbgState->fCpe1Unwanted = 0;
11115 pDbgState->fCpe2Extra = 0;
11116 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11117 pDbgState->bmExitsToCheck[i] = 0;
11118
11119 /*
11120 * Software interrupts (INT XXh) - no idea how to trigger these...
11121 */
11122 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11123 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11124 || VBOXVMM_INT_SOFTWARE_ENABLED())
11125 {
11126 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11127 }
11128
11129 /*
11130 * INT3 breakpoints - triggered by #BP exceptions.
11131 */
11132 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11133 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11134
11135 /*
11136 * Exception bitmap and XCPT events+probes.
11137 */
11138 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11139 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11140 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11141
11142 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11143 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11144 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11145 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11146 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11147 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11148 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11149 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11150 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11151 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11152 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11153 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11154 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11155 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11156 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11157 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11158 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11159 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11160
11161 if (pDbgState->bmXcptExtra)
11162 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11163
11164 /*
11165 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11166 *
11167 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11168 * So, when adding/changing/removing please don't forget to update it.
11169 *
11170 * Some of the macros are picking up local variables to save horizontal space,
11171 * (being able to see it in a table is the lesser evil here).
11172 */
11173#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11174 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11175 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11176#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11177 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11178 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11179 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11180 } else do { } while (0)
11181#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11182 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11183 { \
11184 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11185 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11186 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11187 } else do { } while (0)
11188#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11189 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11190 { \
11191 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11192 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11193 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11194 } else do { } while (0)
11195#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11196 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11197 { \
11198 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11199 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11200 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11201 } else do { } while (0)
11202
11203 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11204 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11205 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11206 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11207 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11208
11209 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11210 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11211 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11212 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11213 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11214 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11215 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11216 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11217 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11218 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11219 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11220 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11221 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11222 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11223 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11224 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11225 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11226 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11227 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11228 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11229 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11230 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11231 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11232 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11233 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11234 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11235 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11236 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11237 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11238 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11239 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11240 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11241 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11242 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11243 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11244 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11245
11246 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11247 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11248 {
11249 int rc = vmxHCImportGuestStateEx(pVCpu, pVmxTransient->pVmcsInfo,
11250 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
11251 AssertRC(rc);
11252
11253#if 0 /** @todo fix me */
11254 pDbgState->fClearCr0Mask = true;
11255 pDbgState->fClearCr4Mask = true;
11256#endif
11257 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11258 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11259 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11260 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11261 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11262 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11263 require clearing here and in the loop if we start using it. */
11264 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11265 }
11266 else
11267 {
11268 if (pDbgState->fClearCr0Mask)
11269 {
11270 pDbgState->fClearCr0Mask = false;
11271 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
11272 }
11273 if (pDbgState->fClearCr4Mask)
11274 {
11275 pDbgState->fClearCr4Mask = false;
11276 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
11277 }
11278 }
11279 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11280 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11281
11282 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11283 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11284 {
11285 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11286 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11287 }
11288 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11289 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11290
11291 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11292 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11293 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11294 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11295 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11296 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11297 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11298 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11299#if 0 /** @todo too slow, fix handler. */
11300 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11301#endif
11302 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11303
11304 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11305 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11306 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11307 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11308 {
11309 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11310 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11311 }
11312 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11313 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11314 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11315 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11316
11317 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11318 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11319 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11320 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11321 {
11322 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11323 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11324 }
11325 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11326 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11327 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11328 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11329
11330 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11331 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11332 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11333 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11334 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11335 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11336 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11337 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11338 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11339 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11340 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11341 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11342 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11343 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11344 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11345 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11346 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11347 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11348 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11349 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11350 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11351 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11352
11353#undef IS_EITHER_ENABLED
11354#undef SET_ONLY_XBM_IF_EITHER_EN
11355#undef SET_CPE1_XBM_IF_EITHER_EN
11356#undef SET_CPEU_XBM_IF_EITHER_EN
11357#undef SET_CPE2_XBM_IF_EITHER_EN
11358
11359 /*
11360 * Sanitize the control stuff.
11361 */
11362 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
11363 if (pDbgState->fCpe2Extra)
11364 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11365 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
11366 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
11367#ifndef IN_NEM_DARWIN
11368 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11369 {
11370 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
11371 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11372 }
11373#else
11374 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11375 {
11376 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
11377 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11378 }
11379#endif
11380
11381 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11382 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11383 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11384 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11385}
11386
11387
11388/**
11389 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11390 * appropriate.
11391 *
11392 * The caller has checked the VM-exit against the
11393 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11394 * already, so we don't have to do that either.
11395 *
11396 * @returns Strict VBox status code (i.e. informational status codes too).
11397 * @param pVCpu The cross context virtual CPU structure.
11398 * @param pVmxTransient The VMX-transient structure.
11399 * @param uExitReason The VM-exit reason.
11400 *
11401 * @remarks The name of this function is displayed by dtrace, so keep it short
11402 * and to the point. No longer than 33 chars long, please.
11403 */
11404static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11405{
11406 /*
11407 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11408 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11409 *
11410 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11411 * does. Must add/change/remove both places. Same ordering, please.
11412 *
11413 * Added/removed events must also be reflected in the next section
11414 * where we dispatch dtrace events.
11415 */
11416 bool fDtrace1 = false;
11417 bool fDtrace2 = false;
11418 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11419 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11420 uint32_t uEventArg = 0;
11421#define SET_EXIT(a_EventSubName) \
11422 do { \
11423 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11424 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11425 } while (0)
11426#define SET_BOTH(a_EventSubName) \
11427 do { \
11428 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11429 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11430 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11431 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11432 } while (0)
11433 switch (uExitReason)
11434 {
11435 case VMX_EXIT_MTF:
11436 return vmxHCExitMtf(pVCpu, pVmxTransient);
11437
11438 case VMX_EXIT_XCPT_OR_NMI:
11439 {
11440 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11441 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11442 {
11443 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11444 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11445 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11446 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11447 {
11448 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11449 {
11450 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE>(pVCpu, pVmxTransient);
11451 uEventArg = pVmxTransient->uExitIntErrorCode;
11452 }
11453 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11454 switch (enmEvent1)
11455 {
11456 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11457 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11458 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11459 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11460 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11461 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11462 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11463 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11464 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11465 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11466 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11467 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11468 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11469 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11470 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11471 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11472 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11473 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11474 default: break;
11475 }
11476 }
11477 else
11478 AssertFailed();
11479 break;
11480
11481 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11482 uEventArg = idxVector;
11483 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11484 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11485 break;
11486 }
11487 break;
11488 }
11489
11490 case VMX_EXIT_TRIPLE_FAULT:
11491 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11492 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11493 break;
11494 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11495 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11496 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11497 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11498 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11499
11500 /* Instruction specific VM-exits: */
11501 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11502 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11503 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11504 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11505 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11506 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11507 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11508 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11509 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11510 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11511 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11512 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11513 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11514 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11515 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11516 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11517 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11518 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11519 case VMX_EXIT_MOV_CRX:
11520 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11521 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11522 SET_BOTH(CRX_READ);
11523 else
11524 SET_BOTH(CRX_WRITE);
11525 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11526 break;
11527 case VMX_EXIT_MOV_DRX:
11528 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11529 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11530 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11531 SET_BOTH(DRX_READ);
11532 else
11533 SET_BOTH(DRX_WRITE);
11534 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11535 break;
11536 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11537 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11538 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11539 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11540 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11541 case VMX_EXIT_GDTR_IDTR_ACCESS:
11542 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11543 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11544 {
11545 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11546 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11547 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11548 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11549 }
11550 break;
11551
11552 case VMX_EXIT_LDTR_TR_ACCESS:
11553 vmxHCReadToTransient<HMVMX_READ_EXIT_INSTR_INFO>(pVCpu, pVmxTransient);
11554 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11555 {
11556 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11557 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11558 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11559 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11560 }
11561 break;
11562
11563 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11564 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11565 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11566 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11567 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11568 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11569 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11570 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11571 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11572 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11573 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11574
11575 /* Events that aren't relevant at this point. */
11576 case VMX_EXIT_EXT_INT:
11577 case VMX_EXIT_INT_WINDOW:
11578 case VMX_EXIT_NMI_WINDOW:
11579 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11580 case VMX_EXIT_PREEMPT_TIMER:
11581 case VMX_EXIT_IO_INSTR:
11582 break;
11583
11584 /* Errors and unexpected events. */
11585 case VMX_EXIT_INIT_SIGNAL:
11586 case VMX_EXIT_SIPI:
11587 case VMX_EXIT_IO_SMI:
11588 case VMX_EXIT_SMI:
11589 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11590 case VMX_EXIT_ERR_MSR_LOAD:
11591 case VMX_EXIT_ERR_MACHINE_CHECK:
11592 case VMX_EXIT_PML_FULL:
11593 case VMX_EXIT_VIRTUALIZED_EOI:
11594 break;
11595
11596 default:
11597 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11598 break;
11599 }
11600#undef SET_BOTH
11601#undef SET_EXIT
11602
11603 /*
11604 * Dtrace tracepoints go first. We do them here at once so we don't
11605 * have to copy the guest state saving and stuff a few dozen times.
11606 * Down side is that we've got to repeat the switch, though this time
11607 * we use enmEvent since the probes are a subset of what DBGF does.
11608 */
11609 if (fDtrace1 || fDtrace2)
11610 {
11611 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11612 vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11613 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11614 switch (enmEvent1)
11615 {
11616 /** @todo consider which extra parameters would be helpful for each probe. */
11617 case DBGFEVENT_END: break;
11618 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11619 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11620 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11621 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11622 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11623 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11624 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11625 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11626 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11627 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11628 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11629 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11630 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11631 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11632 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11633 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11634 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11635 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11636 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11637 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11638 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11639 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11640 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11641 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11642 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11643 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11644 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11645 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11646 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11647 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11648 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11649 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11650 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11651 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11652 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11653 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11654 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11655 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11656 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11657 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11658 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11659 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11660 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11661 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11662 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11663 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11664 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11665 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11666 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11667 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11668 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11669 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11670 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11671 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11672 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11673 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11674 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11675 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11676 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11677 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11678 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11679 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11680 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11681 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11682 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11683 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11684 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11685 }
11686 switch (enmEvent2)
11687 {
11688 /** @todo consider which extra parameters would be helpful for each probe. */
11689 case DBGFEVENT_END: break;
11690 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11691 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11692 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11693 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11694 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11695 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11696 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11697 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11698 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11699 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11700 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11701 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11702 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11703 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11704 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11705 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11706 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11707 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11708 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11709 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11710 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11711 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11712 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11713 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11714 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11715 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11716 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11717 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11718 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11719 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11720 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11721 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11722 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11723 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11724 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11725 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11726 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11727 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11728 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11729 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11730 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11731 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11732 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11733 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11734 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11735 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11736 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11737 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11738 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11739 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11740 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11741 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11742 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11743 }
11744 }
11745
11746 /*
11747 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11748 * the DBGF call will do a full check).
11749 *
11750 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11751 * Note! If we have to events, we prioritize the first, i.e. the instruction
11752 * one, in order to avoid event nesting.
11753 */
11754 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11755 if ( enmEvent1 != DBGFEVENT_END
11756 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11757 {
11758 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11759 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11760 if (rcStrict != VINF_SUCCESS)
11761 return rcStrict;
11762 }
11763 else if ( enmEvent2 != DBGFEVENT_END
11764 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11765 {
11766 vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11767 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11768 if (rcStrict != VINF_SUCCESS)
11769 return rcStrict;
11770 }
11771
11772 return VINF_SUCCESS;
11773}
11774
11775
11776/**
11777 * Single-stepping VM-exit filtering.
11778 *
11779 * This is preprocessing the VM-exits and deciding whether we've gotten far
11780 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11781 * handling is performed.
11782 *
11783 * @returns Strict VBox status code (i.e. informational status codes too).
11784 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11785 * @param pVmxTransient The VMX-transient structure.
11786 * @param pDbgState The debug state.
11787 */
11788DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11789{
11790 /*
11791 * Expensive (saves context) generic dtrace VM-exit probe.
11792 */
11793 uint32_t const uExitReason = pVmxTransient->uExitReason;
11794 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11795 { /* more likely */ }
11796 else
11797 {
11798 vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
11799 int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11800 AssertRC(rc);
11801 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11802 }
11803
11804#ifndef IN_NEM_DARWIN
11805 /*
11806 * Check for host NMI, just to get that out of the way.
11807 */
11808 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11809 { /* normally likely */ }
11810 else
11811 {
11812 vmxHCReadToTransient<HMVMX_READ_EXIT_INTERRUPTION_INFO>(pVCpu, pVmxTransient);
11813 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11814 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11815 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11816 }
11817#endif
11818
11819 /*
11820 * Check for single stepping event if we're stepping.
11821 */
11822 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11823 {
11824 switch (uExitReason)
11825 {
11826 case VMX_EXIT_MTF:
11827 return vmxHCExitMtf(pVCpu, pVmxTransient);
11828
11829 /* Various events: */
11830 case VMX_EXIT_XCPT_OR_NMI:
11831 case VMX_EXIT_EXT_INT:
11832 case VMX_EXIT_TRIPLE_FAULT:
11833 case VMX_EXIT_INT_WINDOW:
11834 case VMX_EXIT_NMI_WINDOW:
11835 case VMX_EXIT_TASK_SWITCH:
11836 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11837 case VMX_EXIT_APIC_ACCESS:
11838 case VMX_EXIT_EPT_VIOLATION:
11839 case VMX_EXIT_EPT_MISCONFIG:
11840 case VMX_EXIT_PREEMPT_TIMER:
11841
11842 /* Instruction specific VM-exits: */
11843 case VMX_EXIT_CPUID:
11844 case VMX_EXIT_GETSEC:
11845 case VMX_EXIT_HLT:
11846 case VMX_EXIT_INVD:
11847 case VMX_EXIT_INVLPG:
11848 case VMX_EXIT_RDPMC:
11849 case VMX_EXIT_RDTSC:
11850 case VMX_EXIT_RSM:
11851 case VMX_EXIT_VMCALL:
11852 case VMX_EXIT_VMCLEAR:
11853 case VMX_EXIT_VMLAUNCH:
11854 case VMX_EXIT_VMPTRLD:
11855 case VMX_EXIT_VMPTRST:
11856 case VMX_EXIT_VMREAD:
11857 case VMX_EXIT_VMRESUME:
11858 case VMX_EXIT_VMWRITE:
11859 case VMX_EXIT_VMXOFF:
11860 case VMX_EXIT_VMXON:
11861 case VMX_EXIT_MOV_CRX:
11862 case VMX_EXIT_MOV_DRX:
11863 case VMX_EXIT_IO_INSTR:
11864 case VMX_EXIT_RDMSR:
11865 case VMX_EXIT_WRMSR:
11866 case VMX_EXIT_MWAIT:
11867 case VMX_EXIT_MONITOR:
11868 case VMX_EXIT_PAUSE:
11869 case VMX_EXIT_GDTR_IDTR_ACCESS:
11870 case VMX_EXIT_LDTR_TR_ACCESS:
11871 case VMX_EXIT_INVEPT:
11872 case VMX_EXIT_RDTSCP:
11873 case VMX_EXIT_INVVPID:
11874 case VMX_EXIT_WBINVD:
11875 case VMX_EXIT_XSETBV:
11876 case VMX_EXIT_RDRAND:
11877 case VMX_EXIT_INVPCID:
11878 case VMX_EXIT_VMFUNC:
11879 case VMX_EXIT_RDSEED:
11880 case VMX_EXIT_XSAVES:
11881 case VMX_EXIT_XRSTORS:
11882 {
11883 int rc = vmxHCImportGuestState<CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
11884 AssertRCReturn(rc, rc);
11885 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11886 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11887 return VINF_EM_DBG_STEPPED;
11888 break;
11889 }
11890
11891 /* Errors and unexpected events: */
11892 case VMX_EXIT_INIT_SIGNAL:
11893 case VMX_EXIT_SIPI:
11894 case VMX_EXIT_IO_SMI:
11895 case VMX_EXIT_SMI:
11896 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11897 case VMX_EXIT_ERR_MSR_LOAD:
11898 case VMX_EXIT_ERR_MACHINE_CHECK:
11899 case VMX_EXIT_PML_FULL:
11900 case VMX_EXIT_VIRTUALIZED_EOI:
11901 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11902 break;
11903
11904 default:
11905 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11906 break;
11907 }
11908 }
11909
11910 /*
11911 * Check for debugger event breakpoints and dtrace probes.
11912 */
11913 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11914 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11915 {
11916 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11917 if (rcStrict != VINF_SUCCESS)
11918 return rcStrict;
11919 }
11920
11921 /*
11922 * Normal processing.
11923 */
11924#ifdef HMVMX_USE_FUNCTION_TABLE
11925 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11926#else
11927 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11928#endif
11929}
11930
11931/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette