VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 93748

Last change on this file since 93748 was 93748, checked in by vboxsync, 3 years ago

VMM/{NEMR3Native-darwin.cpp,HMVMXR0.cpp,VMXAllTemplate.cpp.h}: Move some of the debug loop helpers to the all context template in order to be able to use it for the macOS NEM backend to enable some rudimentary VBox debugger support (breakpoints, etc.), bugref:9044 and bugref:10136

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 485.2 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 93748 2022-02-15 12:20:46Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
23# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
24#endif
25
26
27#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
28# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
29#endif
30
31
32/** Use the function table. */
33#define HMVMX_USE_FUNCTION_TABLE
34
35/** Determine which tagged-TLB flush handler to use. */
36#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
37#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
38#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
39#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
40
41/**
42 * Flags to skip redundant reads of some common VMCS fields that are not part of
43 * the guest-CPU or VCPU state but are needed while handling VM-exits.
44 */
45#define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0)
46#define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1)
47#define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2)
48#define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3)
49#define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4)
50#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5)
51#define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6)
52#define HMVMX_READ_GUEST_LINEAR_ADDR RT_BIT_32(7)
53#define HMVMX_READ_GUEST_PHYSICAL_ADDR RT_BIT_32(8)
54#define HMVMX_READ_GUEST_PENDING_DBG_XCPTS RT_BIT_32(9)
55
56/** All the VMCS fields required for processing of exception/NMI VM-exits. */
57#define HMVMX_READ_XCPT_INFO ( HMVMX_READ_EXIT_INTERRUPTION_INFO \
58 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE \
59 | HMVMX_READ_EXIT_INSTR_LEN \
60 | HMVMX_READ_IDT_VECTORING_INFO \
61 | HMVMX_READ_IDT_VECTORING_ERROR_CODE)
62
63/** Assert that all the given fields have been read from the VMCS. */
64#ifdef VBOX_STRICT
65# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
66 do { \
67 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
68 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
69 } while (0)
70#else
71# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
72#endif
73
74/**
75 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
76 * guest using hardware-assisted VMX.
77 *
78 * This excludes state like GPRs (other than RSP) which are always are
79 * swapped and restored across the world-switch and also registers like EFER,
80 * MSR which cannot be modified by the guest without causing a VM-exit.
81 */
82#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
83 | CPUMCTX_EXTRN_RFLAGS \
84 | CPUMCTX_EXTRN_RSP \
85 | CPUMCTX_EXTRN_SREG_MASK \
86 | CPUMCTX_EXTRN_TABLE_MASK \
87 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
88 | CPUMCTX_EXTRN_SYSCALL_MSRS \
89 | CPUMCTX_EXTRN_SYSENTER_MSRS \
90 | CPUMCTX_EXTRN_TSC_AUX \
91 | CPUMCTX_EXTRN_OTHER_MSRS \
92 | CPUMCTX_EXTRN_CR0 \
93 | CPUMCTX_EXTRN_CR3 \
94 | CPUMCTX_EXTRN_CR4 \
95 | CPUMCTX_EXTRN_DR7 \
96 | CPUMCTX_EXTRN_HWVIRT \
97 | CPUMCTX_EXTRN_INHIBIT_INT \
98 | CPUMCTX_EXTRN_INHIBIT_NMI)
99
100/**
101 * Exception bitmap mask for real-mode guests (real-on-v86).
102 *
103 * We need to intercept all exceptions manually except:
104 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
105 * due to bugs in Intel CPUs.
106 * - \#PF need not be intercepted even in real-mode if we have nested paging
107 * support.
108 */
109#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
110 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
111 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
112 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
113 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
114 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
115 | RT_BIT(X86_XCPT_XF))
116
117/** Maximum VM-instruction error number. */
118#define HMVMX_INSTR_ERROR_MAX 28
119
120/** Profiling macro. */
121#ifdef HM_PROFILE_EXIT_DISPATCH
122# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
123# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
124#else
125# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
126# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
127#endif
128
129#ifndef IN_NEM_DARWIN
130/** Assert that preemption is disabled or covered by thread-context hooks. */
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
132 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
133
134/** Assert that we haven't migrated CPUs when thread-context hooks are not
135 * used. */
136# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
137 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
138 ("Illegal migration! Entered on CPU %u Current %u\n", \
139 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
140#else
141# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
142# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
143#endif
144
145/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
146 * context. */
147#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
148 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
149 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
150
151/** Log the VM-exit reason with an easily visible marker to identify it in a
152 * potential sea of logging data. */
153#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
154 do { \
155 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
156 HMGetVmxExitName(a_uExitReason))); \
157 } while (0) \
158
159
160/*********************************************************************************************************************************
161* Structures and Typedefs *
162*********************************************************************************************************************************/
163/**
164 * Memory operand read or write access.
165 */
166typedef enum VMXMEMACCESS
167{
168 VMXMEMACCESS_READ = 0,
169 VMXMEMACCESS_WRITE = 1
170} VMXMEMACCESS;
171
172
173/**
174 * VMX VM-exit handler.
175 *
176 * @returns Strict VBox status code (i.e. informational status codes too).
177 * @param pVCpu The cross context virtual CPU structure.
178 * @param pVmxTransient The VMX-transient structure.
179 */
180#ifndef HMVMX_USE_FUNCTION_TABLE
181typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
182#else
183typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
184/** Pointer to VM-exit handler. */
185typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
186#endif
187
188/**
189 * VMX VM-exit handler, non-strict status code.
190 *
191 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
192 *
193 * @returns VBox status code, no informational status code returned.
194 * @param pVCpu The cross context virtual CPU structure.
195 * @param pVmxTransient The VMX-transient structure.
196 *
197 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
198 * use of that status code will be replaced with VINF_EM_SOMETHING
199 * later when switching over to IEM.
200 */
201#ifndef HMVMX_USE_FUNCTION_TABLE
202typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203#else
204typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
205#endif
206
207
208/*********************************************************************************************************************************
209* Internal Functions *
210*********************************************************************************************************************************/
211#ifndef HMVMX_USE_FUNCTION_TABLE
212DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
213# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
214# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
215#else
216# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
217# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
218#endif
219#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
220DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
221#endif
222
223static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
224
225/** @name VM-exit handler prototypes.
226 * @{
227 */
228static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
229static FNVMXEXITHANDLER vmxHCExitExtInt;
230static FNVMXEXITHANDLER vmxHCExitTripleFault;
231static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
232static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
233static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
234static FNVMXEXITHANDLER vmxHCExitCpuid;
235static FNVMXEXITHANDLER vmxHCExitGetsec;
236static FNVMXEXITHANDLER vmxHCExitHlt;
237static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
238static FNVMXEXITHANDLER vmxHCExitInvlpg;
239static FNVMXEXITHANDLER vmxHCExitRdpmc;
240static FNVMXEXITHANDLER vmxHCExitVmcall;
241#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
242static FNVMXEXITHANDLER vmxHCExitVmclear;
243static FNVMXEXITHANDLER vmxHCExitVmlaunch;
244static FNVMXEXITHANDLER vmxHCExitVmptrld;
245static FNVMXEXITHANDLER vmxHCExitVmptrst;
246static FNVMXEXITHANDLER vmxHCExitVmread;
247static FNVMXEXITHANDLER vmxHCExitVmresume;
248static FNVMXEXITHANDLER vmxHCExitVmwrite;
249static FNVMXEXITHANDLER vmxHCExitVmxoff;
250static FNVMXEXITHANDLER vmxHCExitVmxon;
251static FNVMXEXITHANDLER vmxHCExitInvvpid;
252# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
253static FNVMXEXITHANDLER vmxHCExitInvept;
254# endif
255#endif
256static FNVMXEXITHANDLER vmxHCExitRdtsc;
257static FNVMXEXITHANDLER vmxHCExitMovCRx;
258static FNVMXEXITHANDLER vmxHCExitMovDRx;
259static FNVMXEXITHANDLER vmxHCExitIoInstr;
260static FNVMXEXITHANDLER vmxHCExitRdmsr;
261static FNVMXEXITHANDLER vmxHCExitWrmsr;
262static FNVMXEXITHANDLER vmxHCExitMwait;
263static FNVMXEXITHANDLER vmxHCExitMtf;
264static FNVMXEXITHANDLER vmxHCExitMonitor;
265static FNVMXEXITHANDLER vmxHCExitPause;
266static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
267static FNVMXEXITHANDLER vmxHCExitApicAccess;
268static FNVMXEXITHANDLER vmxHCExitEptViolation;
269static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
270static FNVMXEXITHANDLER vmxHCExitRdtscp;
271static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
272static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
273static FNVMXEXITHANDLER vmxHCExitXsetbv;
274static FNVMXEXITHANDLER vmxHCExitInvpcid;
275static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
276static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
277static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
278/** @} */
279
280#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
281/** @name Nested-guest VM-exit handler prototypes.
282 * @{
283 */
284static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
285static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
286static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
287static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
288static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
289static FNVMXEXITHANDLER vmxHCExitHltNested;
290static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
291static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
292static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
293static FNVMXEXITHANDLER vmxHCExitRdtscNested;
294static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
295static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
296static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
297static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
298static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
299static FNVMXEXITHANDLER vmxHCExitMwaitNested;
300static FNVMXEXITHANDLER vmxHCExitMtfNested;
301static FNVMXEXITHANDLER vmxHCExitMonitorNested;
302static FNVMXEXITHANDLER vmxHCExitPauseNested;
303static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
304static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
305static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
306static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
307static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
308static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
309static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
310static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
311static FNVMXEXITHANDLER vmxHCExitInstrNested;
312static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
313/** @} */
314#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
315
316
317/*********************************************************************************************************************************
318* Global Variables *
319*********************************************************************************************************************************/
320#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
321/**
322 * Array of all VMCS fields.
323 * Any fields added to the VT-x spec. should be added here.
324 *
325 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
326 * of nested-guests.
327 */
328static const uint32_t g_aVmcsFields[] =
329{
330 /* 16-bit control fields. */
331 VMX_VMCS16_VPID,
332 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
333 VMX_VMCS16_EPTP_INDEX,
334
335 /* 16-bit guest-state fields. */
336 VMX_VMCS16_GUEST_ES_SEL,
337 VMX_VMCS16_GUEST_CS_SEL,
338 VMX_VMCS16_GUEST_SS_SEL,
339 VMX_VMCS16_GUEST_DS_SEL,
340 VMX_VMCS16_GUEST_FS_SEL,
341 VMX_VMCS16_GUEST_GS_SEL,
342 VMX_VMCS16_GUEST_LDTR_SEL,
343 VMX_VMCS16_GUEST_TR_SEL,
344 VMX_VMCS16_GUEST_INTR_STATUS,
345 VMX_VMCS16_GUEST_PML_INDEX,
346
347 /* 16-bits host-state fields. */
348 VMX_VMCS16_HOST_ES_SEL,
349 VMX_VMCS16_HOST_CS_SEL,
350 VMX_VMCS16_HOST_SS_SEL,
351 VMX_VMCS16_HOST_DS_SEL,
352 VMX_VMCS16_HOST_FS_SEL,
353 VMX_VMCS16_HOST_GS_SEL,
354 VMX_VMCS16_HOST_TR_SEL,
355
356 /* 64-bit control fields. */
357 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
358 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
359 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
360 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
361 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
362 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
363 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
364 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
365 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
366 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
367 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
368 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
369 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
370 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
371 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
372 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
373 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
374 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
375 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
376 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
377 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
378 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
379 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
380 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
381 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
382 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
383 VMX_VMCS64_CTRL_EPTP_FULL,
384 VMX_VMCS64_CTRL_EPTP_HIGH,
385 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
386 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
387 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
388 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
389 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
390 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
391 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
392 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
393 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
394 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
395 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
396 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
397 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
398 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
399 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
400 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
401 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
402 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
403 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
404 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
405 VMX_VMCS64_CTRL_SPPTP_FULL,
406 VMX_VMCS64_CTRL_SPPTP_HIGH,
407 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
408 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
409 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
410 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
411 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
412 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
413
414 /* 64-bit read-only data fields. */
415 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
416 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
417
418 /* 64-bit guest-state fields. */
419 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
420 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
421 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
422 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
423 VMX_VMCS64_GUEST_PAT_FULL,
424 VMX_VMCS64_GUEST_PAT_HIGH,
425 VMX_VMCS64_GUEST_EFER_FULL,
426 VMX_VMCS64_GUEST_EFER_HIGH,
427 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
428 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
429 VMX_VMCS64_GUEST_PDPTE0_FULL,
430 VMX_VMCS64_GUEST_PDPTE0_HIGH,
431 VMX_VMCS64_GUEST_PDPTE1_FULL,
432 VMX_VMCS64_GUEST_PDPTE1_HIGH,
433 VMX_VMCS64_GUEST_PDPTE2_FULL,
434 VMX_VMCS64_GUEST_PDPTE2_HIGH,
435 VMX_VMCS64_GUEST_PDPTE3_FULL,
436 VMX_VMCS64_GUEST_PDPTE3_HIGH,
437 VMX_VMCS64_GUEST_BNDCFGS_FULL,
438 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
439 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
440 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
441 VMX_VMCS64_GUEST_PKRS_FULL,
442 VMX_VMCS64_GUEST_PKRS_HIGH,
443
444 /* 64-bit host-state fields. */
445 VMX_VMCS64_HOST_PAT_FULL,
446 VMX_VMCS64_HOST_PAT_HIGH,
447 VMX_VMCS64_HOST_EFER_FULL,
448 VMX_VMCS64_HOST_EFER_HIGH,
449 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
450 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
451 VMX_VMCS64_HOST_PKRS_FULL,
452 VMX_VMCS64_HOST_PKRS_HIGH,
453
454 /* 32-bit control fields. */
455 VMX_VMCS32_CTRL_PIN_EXEC,
456 VMX_VMCS32_CTRL_PROC_EXEC,
457 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
458 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
459 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
460 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
461 VMX_VMCS32_CTRL_EXIT,
462 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
463 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
464 VMX_VMCS32_CTRL_ENTRY,
465 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
466 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
467 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
468 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
469 VMX_VMCS32_CTRL_TPR_THRESHOLD,
470 VMX_VMCS32_CTRL_PROC_EXEC2,
471 VMX_VMCS32_CTRL_PLE_GAP,
472 VMX_VMCS32_CTRL_PLE_WINDOW,
473
474 /* 32-bits read-only fields. */
475 VMX_VMCS32_RO_VM_INSTR_ERROR,
476 VMX_VMCS32_RO_EXIT_REASON,
477 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
478 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
479 VMX_VMCS32_RO_IDT_VECTORING_INFO,
480 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
481 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
482 VMX_VMCS32_RO_EXIT_INSTR_INFO,
483
484 /* 32-bit guest-state fields. */
485 VMX_VMCS32_GUEST_ES_LIMIT,
486 VMX_VMCS32_GUEST_CS_LIMIT,
487 VMX_VMCS32_GUEST_SS_LIMIT,
488 VMX_VMCS32_GUEST_DS_LIMIT,
489 VMX_VMCS32_GUEST_FS_LIMIT,
490 VMX_VMCS32_GUEST_GS_LIMIT,
491 VMX_VMCS32_GUEST_LDTR_LIMIT,
492 VMX_VMCS32_GUEST_TR_LIMIT,
493 VMX_VMCS32_GUEST_GDTR_LIMIT,
494 VMX_VMCS32_GUEST_IDTR_LIMIT,
495 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
498 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_INT_STATE,
504 VMX_VMCS32_GUEST_ACTIVITY_STATE,
505 VMX_VMCS32_GUEST_SMBASE,
506 VMX_VMCS32_GUEST_SYSENTER_CS,
507 VMX_VMCS32_PREEMPT_TIMER_VALUE,
508
509 /* 32-bit host-state fields. */
510 VMX_VMCS32_HOST_SYSENTER_CS,
511
512 /* Natural-width control fields. */
513 VMX_VMCS_CTRL_CR0_MASK,
514 VMX_VMCS_CTRL_CR4_MASK,
515 VMX_VMCS_CTRL_CR0_READ_SHADOW,
516 VMX_VMCS_CTRL_CR4_READ_SHADOW,
517 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
518 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
519 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
520 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
521
522 /* Natural-width read-only data fields. */
523 VMX_VMCS_RO_EXIT_QUALIFICATION,
524 VMX_VMCS_RO_IO_RCX,
525 VMX_VMCS_RO_IO_RSI,
526 VMX_VMCS_RO_IO_RDI,
527 VMX_VMCS_RO_IO_RIP,
528 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
529
530 /* Natural-width guest-state field */
531 VMX_VMCS_GUEST_CR0,
532 VMX_VMCS_GUEST_CR3,
533 VMX_VMCS_GUEST_CR4,
534 VMX_VMCS_GUEST_ES_BASE,
535 VMX_VMCS_GUEST_CS_BASE,
536 VMX_VMCS_GUEST_SS_BASE,
537 VMX_VMCS_GUEST_DS_BASE,
538 VMX_VMCS_GUEST_FS_BASE,
539 VMX_VMCS_GUEST_GS_BASE,
540 VMX_VMCS_GUEST_LDTR_BASE,
541 VMX_VMCS_GUEST_TR_BASE,
542 VMX_VMCS_GUEST_GDTR_BASE,
543 VMX_VMCS_GUEST_IDTR_BASE,
544 VMX_VMCS_GUEST_DR7,
545 VMX_VMCS_GUEST_RSP,
546 VMX_VMCS_GUEST_RIP,
547 VMX_VMCS_GUEST_RFLAGS,
548 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
549 VMX_VMCS_GUEST_SYSENTER_ESP,
550 VMX_VMCS_GUEST_SYSENTER_EIP,
551 VMX_VMCS_GUEST_S_CET,
552 VMX_VMCS_GUEST_SSP,
553 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
554
555 /* Natural-width host-state fields */
556 VMX_VMCS_HOST_CR0,
557 VMX_VMCS_HOST_CR3,
558 VMX_VMCS_HOST_CR4,
559 VMX_VMCS_HOST_FS_BASE,
560 VMX_VMCS_HOST_GS_BASE,
561 VMX_VMCS_HOST_TR_BASE,
562 VMX_VMCS_HOST_GDTR_BASE,
563 VMX_VMCS_HOST_IDTR_BASE,
564 VMX_VMCS_HOST_SYSENTER_ESP,
565 VMX_VMCS_HOST_SYSENTER_EIP,
566 VMX_VMCS_HOST_RSP,
567 VMX_VMCS_HOST_RIP,
568 VMX_VMCS_HOST_S_CET,
569 VMX_VMCS_HOST_SSP,
570 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
571};
572#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
573
574#ifdef VBOX_STRICT
575static const uint32_t g_aVmcsSegBase[] =
576{
577 VMX_VMCS_GUEST_ES_BASE,
578 VMX_VMCS_GUEST_CS_BASE,
579 VMX_VMCS_GUEST_SS_BASE,
580 VMX_VMCS_GUEST_DS_BASE,
581 VMX_VMCS_GUEST_FS_BASE,
582 VMX_VMCS_GUEST_GS_BASE
583};
584static const uint32_t g_aVmcsSegSel[] =
585{
586 VMX_VMCS16_GUEST_ES_SEL,
587 VMX_VMCS16_GUEST_CS_SEL,
588 VMX_VMCS16_GUEST_SS_SEL,
589 VMX_VMCS16_GUEST_DS_SEL,
590 VMX_VMCS16_GUEST_FS_SEL,
591 VMX_VMCS16_GUEST_GS_SEL
592};
593static const uint32_t g_aVmcsSegLimit[] =
594{
595 VMX_VMCS32_GUEST_ES_LIMIT,
596 VMX_VMCS32_GUEST_CS_LIMIT,
597 VMX_VMCS32_GUEST_SS_LIMIT,
598 VMX_VMCS32_GUEST_DS_LIMIT,
599 VMX_VMCS32_GUEST_FS_LIMIT,
600 VMX_VMCS32_GUEST_GS_LIMIT
601};
602static const uint32_t g_aVmcsSegAttr[] =
603{
604 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
605 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
606 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
607 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
608 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
609 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
610};
611AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
612AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
613AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
614AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
615#endif /* VBOX_STRICT */
616
617#ifdef HMVMX_USE_FUNCTION_TABLE
618/**
619 * VMX_EXIT dispatch table.
620 */
621static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
622{
623 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
624 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
625 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
626 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
627 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
628 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
629 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
630 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
631 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
632 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
633 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
634 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
635 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
636 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
637 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
638 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
639 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
640 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
641 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
642#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
643 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
644 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
645 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
646 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
647 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
648 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
649 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
650 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
651 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
652#else
653 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
654 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
655 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
656 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
657 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
658 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
659 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
660 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
661 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
662#endif
663 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
664 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
665 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
666 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
667 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
668 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
669 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
670 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
671 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
672 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
673 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
674 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
675 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
676 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
677 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
678 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
679 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
680 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
681 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
682 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
683 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
684 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
685#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
686 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
687#else
688 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
689#endif
690 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
691 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
692#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
693 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
694#else
695 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
696#endif
697 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
698 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
699 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
700 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
701 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
702 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
703 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
704 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
705 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
706 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
707 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
708 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
709 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
710 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
711 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
712 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
713};
714#endif /* HMVMX_USE_FUNCTION_TABLE */
715
716#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
717static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
718{
719 /* 0 */ "(Not Used)",
720 /* 1 */ "VMCALL executed in VMX root operation.",
721 /* 2 */ "VMCLEAR with invalid physical address.",
722 /* 3 */ "VMCLEAR with VMXON pointer.",
723 /* 4 */ "VMLAUNCH with non-clear VMCS.",
724 /* 5 */ "VMRESUME with non-launched VMCS.",
725 /* 6 */ "VMRESUME after VMXOFF",
726 /* 7 */ "VM-entry with invalid control fields.",
727 /* 8 */ "VM-entry with invalid host state fields.",
728 /* 9 */ "VMPTRLD with invalid physical address.",
729 /* 10 */ "VMPTRLD with VMXON pointer.",
730 /* 11 */ "VMPTRLD with incorrect revision identifier.",
731 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
732 /* 13 */ "VMWRITE to read-only VMCS component.",
733 /* 14 */ "(Not Used)",
734 /* 15 */ "VMXON executed in VMX root operation.",
735 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
736 /* 17 */ "VM-entry with non-launched executing VMCS.",
737 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
738 /* 19 */ "VMCALL with non-clear VMCS.",
739 /* 20 */ "VMCALL with invalid VM-exit control fields.",
740 /* 21 */ "(Not Used)",
741 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
742 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
743 /* 24 */ "VMCALL with invalid SMM-monitor features.",
744 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
745 /* 26 */ "VM-entry with events blocked by MOV SS.",
746 /* 27 */ "(Not Used)",
747 /* 28 */ "Invalid operand to INVEPT/INVVPID."
748};
749#endif /* VBOX_STRICT && LOG_ENABLED */
750
751
752/**
753 * Gets the CR0 guest/host mask.
754 *
755 * These bits typically does not change through the lifetime of a VM. Any bit set in
756 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
757 * by the guest.
758 *
759 * @returns The CR0 guest/host mask.
760 * @param pVCpu The cross context virtual CPU structure.
761 */
762static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
763{
764 /*
765 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
766 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
767 *
768 * Furthermore, modifications to any bits that are reserved/unspecified currently
769 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
770 * when future CPUs specify and use currently reserved/unspecified bits.
771 */
772 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
773 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
774 * and @bugref{6944}. */
775 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
776 return ( X86_CR0_PE
777 | X86_CR0_NE
778 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
779 | X86_CR0_PG
780 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
781}
782
783
784/**
785 * Gets the CR4 guest/host mask.
786 *
787 * These bits typically does not change through the lifetime of a VM. Any bit set in
788 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
789 * by the guest.
790 *
791 * @returns The CR4 guest/host mask.
792 * @param pVCpu The cross context virtual CPU structure.
793 */
794static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
795{
796 /*
797 * We construct a mask of all CR4 bits that the guest can modify without causing
798 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
799 * a VM-exit when the guest attempts to modify them when executing using
800 * hardware-assisted VMX.
801 *
802 * When a feature is not exposed to the guest (and may be present on the host),
803 * we want to intercept guest modifications to the bit so we can emulate proper
804 * behavior (e.g., #GP).
805 *
806 * Furthermore, only modifications to those bits that don't require immediate
807 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
808 * depends on CR3 which might not always be the guest value while executing
809 * using hardware-assisted VMX.
810 */
811 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
812 bool const fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
813 bool const fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
814 bool const fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
815
816 /*
817 * Paranoia.
818 * Ensure features exposed to the guest are present on the host.
819 */
820 Assert(!fFsGsBase || pVM->cpum.ro.HostFeatures.fFsGsBase);
821 Assert(!fXSaveRstor || pVM->cpum.ro.HostFeatures.fXSaveRstor);
822 Assert(!fFxSaveRstor || pVM->cpum.ro.HostFeatures.fFxSaveRstor);
823
824 uint64_t const fGstMask = ( X86_CR4_PVI
825 | X86_CR4_TSD
826 | X86_CR4_DE
827 | X86_CR4_MCE
828 | X86_CR4_PCE
829 | X86_CR4_OSXMMEEXCPT
830 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
831 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
832 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0));
833 return ~fGstMask;
834}
835
836
837/**
838 * Adds one or more exceptions to the exception bitmap and commits it to the current
839 * VMCS.
840 *
841 * @param pVCpu The cross context virtual CPU structure.
842 * @param pVmxTransient The VMX-transient structure.
843 * @param uXcptMask The exception(s) to add.
844 */
845static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
846{
847 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
848 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
849 if ((uXcptBitmap & uXcptMask) != uXcptMask)
850 {
851 uXcptBitmap |= uXcptMask;
852 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
853 AssertRC(rc);
854 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
855 }
856}
857
858
859/**
860 * Adds an exception to the exception bitmap and commits it to the current VMCS.
861 *
862 * @param pVCpu The cross context virtual CPU structure.
863 * @param pVmxTransient The VMX-transient structure.
864 * @param uXcpt The exception to add.
865 */
866static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
867{
868 Assert(uXcpt <= X86_XCPT_LAST);
869 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
870}
871
872
873/**
874 * Remove one or more exceptions from the exception bitmap and commits it to the
875 * current VMCS.
876 *
877 * This takes care of not removing the exception intercept if a nested-guest
878 * requires the exception to be intercepted.
879 *
880 * @returns VBox status code.
881 * @param pVCpu The cross context virtual CPU structure.
882 * @param pVmxTransient The VMX-transient structure.
883 * @param uXcptMask The exception(s) to remove.
884 */
885static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
886{
887 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
888 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
889 if (u32XcptBitmap & uXcptMask)
890 {
891#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
892 if (!pVmxTransient->fIsNestedGuest)
893 { /* likely */ }
894 else
895 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
896#endif
897#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
898 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
899 | RT_BIT(X86_XCPT_DE)
900 | RT_BIT(X86_XCPT_NM)
901 | RT_BIT(X86_XCPT_TS)
902 | RT_BIT(X86_XCPT_UD)
903 | RT_BIT(X86_XCPT_NP)
904 | RT_BIT(X86_XCPT_SS)
905 | RT_BIT(X86_XCPT_GP)
906 | RT_BIT(X86_XCPT_PF)
907 | RT_BIT(X86_XCPT_MF));
908#elif defined(HMVMX_ALWAYS_TRAP_PF)
909 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
910#endif
911 if (uXcptMask)
912 {
913 /* Validate we are not removing any essential exception intercepts. */
914#ifndef IN_NEM_DARWIN
915 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
916#else
917 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
918#endif
919 NOREF(pVCpu);
920 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
921 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
922
923 /* Remove it from the exception bitmap. */
924 u32XcptBitmap &= ~uXcptMask;
925
926 /* Commit and update the cache if necessary. */
927 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
928 {
929 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
930 AssertRC(rc);
931 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
932 }
933 }
934 }
935 return VINF_SUCCESS;
936}
937
938
939/**
940 * Remove an exceptions from the exception bitmap and commits it to the current
941 * VMCS.
942 *
943 * @returns VBox status code.
944 * @param pVCpu The cross context virtual CPU structure.
945 * @param pVmxTransient The VMX-transient structure.
946 * @param uXcpt The exception to remove.
947 */
948static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
949{
950 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
951}
952
953
954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
955/**
956 * Loads the shadow VMCS specified by the VMCS info. object.
957 *
958 * @returns VBox status code.
959 * @param pVmcsInfo The VMCS info. object.
960 *
961 * @remarks Can be called with interrupts disabled.
962 */
963static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
964{
965 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
966 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
967
968 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
969 if (RT_SUCCESS(rc))
970 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
971 return rc;
972}
973
974
975/**
976 * Clears the shadow VMCS specified by the VMCS info. object.
977 *
978 * @returns VBox status code.
979 * @param pVmcsInfo The VMCS info. object.
980 *
981 * @remarks Can be called with interrupts disabled.
982 */
983static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
984{
985 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
986 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
987
988 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
989 if (RT_SUCCESS(rc))
990 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
991 return rc;
992}
993
994
995/**
996 * Switches from and to the specified VMCSes.
997 *
998 * @returns VBox status code.
999 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
1000 * @param pVmcsInfoTo The VMCS info. object we are switching to.
1001 *
1002 * @remarks Called with interrupts disabled.
1003 */
1004static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
1005{
1006 /*
1007 * Clear the VMCS we are switching out if it has not already been cleared.
1008 * This will sync any CPU internal data back to the VMCS.
1009 */
1010 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1011 {
1012 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
1013 if (RT_SUCCESS(rc))
1014 {
1015 /*
1016 * The shadow VMCS, if any, would not be active at this point since we
1017 * would have cleared it while importing the virtual hardware-virtualization
1018 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1019 * clear the shadow VMCS here, just assert for safety.
1020 */
1021 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1022 }
1023 else
1024 return rc;
1025 }
1026
1027 /*
1028 * Clear the VMCS we are switching to if it has not already been cleared.
1029 * This will initialize the VMCS launch state to "clear" required for loading it.
1030 *
1031 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1032 */
1033 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1034 {
1035 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1036 if (RT_SUCCESS(rc))
1037 { /* likely */ }
1038 else
1039 return rc;
1040 }
1041
1042 /*
1043 * Finally, load the VMCS we are switching to.
1044 */
1045 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1046}
1047
1048
1049/**
1050 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1051 * caller.
1052 *
1053 * @returns VBox status code.
1054 * @param pVCpu The cross context virtual CPU structure.
1055 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1056 * true) or guest VMCS (pass false).
1057 */
1058static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1059{
1060 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1061 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1062
1063 PVMXVMCSINFO pVmcsInfoFrom;
1064 PVMXVMCSINFO pVmcsInfoTo;
1065 if (fSwitchToNstGstVmcs)
1066 {
1067 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1068 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1069 }
1070 else
1071 {
1072 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1073 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1074 }
1075
1076 /*
1077 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1078 * preemption hook code path acquires the current VMCS.
1079 */
1080 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1081
1082 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1083 if (RT_SUCCESS(rc))
1084 {
1085 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1086 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1087
1088 /*
1089 * If we are switching to a VMCS that was executed on a different host CPU or was
1090 * never executed before, flag that we need to export the host state before executing
1091 * guest/nested-guest code using hardware-assisted VMX.
1092 *
1093 * This could probably be done in a preemptible context since the preemption hook
1094 * will flag the necessary change in host context. However, since preemption is
1095 * already disabled and to avoid making assumptions about host specific code in
1096 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1097 * disabled.
1098 */
1099 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1100 { /* likely */ }
1101 else
1102 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1103
1104 ASMSetFlags(fEFlags);
1105
1106 /*
1107 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1108 * flag that we need to update the host MSR values there. Even if we decide in the
1109 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1110 * if its content differs, we would have to update the host MSRs anyway.
1111 */
1112 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1113 }
1114 else
1115 ASMSetFlags(fEFlags);
1116 return rc;
1117}
1118#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1119
1120
1121#ifdef VBOX_STRICT
1122/**
1123 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1124 * transient structure.
1125 *
1126 * @param pVCpu The cross context virtual CPU structure.
1127 * @param pVmxTransient The VMX-transient structure.
1128 */
1129DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1130{
1131 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1132 AssertRC(rc);
1133}
1134
1135
1136/**
1137 * Reads the VM-entry exception error code field from the VMCS into
1138 * the VMX transient structure.
1139 *
1140 * @param pVCpu The cross context virtual CPU structure.
1141 * @param pVmxTransient The VMX-transient structure.
1142 */
1143DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1144{
1145 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1146 AssertRC(rc);
1147}
1148
1149
1150/**
1151 * Reads the VM-entry exception error code field from the VMCS into
1152 * the VMX transient structure.
1153 *
1154 * @param pVCpu The cross context virtual CPU structure.
1155 * @param pVmxTransient The VMX-transient structure.
1156 */
1157DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1158{
1159 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1160 AssertRC(rc);
1161}
1162#endif /* VBOX_STRICT */
1163
1164
1165/**
1166 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1167 * transient structure.
1168 *
1169 * @param pVCpu The cross context virtual CPU structure.
1170 * @param pVmxTransient The VMX-transient structure.
1171 */
1172DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1173{
1174 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1175 {
1176 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1177 AssertRC(rc);
1178 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1179 }
1180}
1181
1182
1183/**
1184 * Reads the VM-exit interruption error code from the VMCS into the VMX
1185 * transient structure.
1186 *
1187 * @param pVCpu The cross context virtual CPU structure.
1188 * @param pVmxTransient The VMX-transient structure.
1189 */
1190DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1191{
1192 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1193 {
1194 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1195 AssertRC(rc);
1196 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1197 }
1198}
1199
1200
1201/**
1202 * Reads the VM-exit instruction length field from the VMCS into the VMX
1203 * transient structure.
1204 *
1205 * @param pVCpu The cross context virtual CPU structure.
1206 * @param pVmxTransient The VMX-transient structure.
1207 */
1208DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1209{
1210 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1211 {
1212 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1213 AssertRC(rc);
1214 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1215 }
1216}
1217
1218
1219/**
1220 * Reads the VM-exit instruction-information field from the VMCS into
1221 * the VMX transient structure.
1222 *
1223 * @param pVCpu The cross context virtual CPU structure.
1224 * @param pVmxTransient The VMX-transient structure.
1225 */
1226DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1227{
1228 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1229 {
1230 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1231 AssertRC(rc);
1232 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1233 }
1234}
1235
1236
1237/**
1238 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1239 *
1240 * @param pVCpu The cross context virtual CPU structure.
1241 * @param pVmxTransient The VMX-transient structure.
1242 */
1243DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1244{
1245 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1246 {
1247 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1248 AssertRC(rc);
1249 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1250 }
1251}
1252
1253
1254/**
1255 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1256 *
1257 * @param pVCpu The cross context virtual CPU structure.
1258 * @param pVmxTransient The VMX-transient structure.
1259 */
1260DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1261{
1262 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1263 {
1264 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1265 AssertRC(rc);
1266 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1267 }
1268}
1269
1270
1271/**
1272 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1273 *
1274 * @param pVCpu The cross context virtual CPU structure.
1275 * @param pVmxTransient The VMX-transient structure.
1276 */
1277DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1278{
1279 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1280 {
1281 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1282 AssertRC(rc);
1283 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1284 }
1285}
1286
1287#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1288/**
1289 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1290 * structure.
1291 *
1292 * @param pVCpu The cross context virtual CPU structure.
1293 * @param pVmxTransient The VMX-transient structure.
1294 */
1295DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1296{
1297 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1298 {
1299 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1300 AssertRC(rc);
1301 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1302 }
1303}
1304#endif
1305
1306/**
1307 * Reads the IDT-vectoring information field from the VMCS into the VMX
1308 * transient structure.
1309 *
1310 * @param pVCpu The cross context virtual CPU structure.
1311 * @param pVmxTransient The VMX-transient structure.
1312 *
1313 * @remarks No-long-jump zone!!!
1314 */
1315DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1316{
1317 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1318 {
1319 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1320 AssertRC(rc);
1321 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1322 }
1323}
1324
1325
1326/**
1327 * Reads the IDT-vectoring error code from the VMCS into the VMX
1328 * transient structure.
1329 *
1330 * @param pVCpu The cross context virtual CPU structure.
1331 * @param pVmxTransient The VMX-transient structure.
1332 */
1333DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1334{
1335 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1336 {
1337 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1338 AssertRC(rc);
1339 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1340 }
1341}
1342
1343#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1344/**
1345 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1346 *
1347 * @param pVCpu The cross context virtual CPU structure.
1348 * @param pVmxTransient The VMX-transient structure.
1349 */
1350static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1351{
1352 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1353 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1354 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1355 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1356 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1357 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1358 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1359 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1360 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1361 AssertRC(rc);
1362 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1363 | HMVMX_READ_EXIT_INSTR_LEN
1364 | HMVMX_READ_EXIT_INSTR_INFO
1365 | HMVMX_READ_IDT_VECTORING_INFO
1366 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1367 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1368 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1369 | HMVMX_READ_GUEST_LINEAR_ADDR
1370 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1371}
1372#endif
1373
1374/**
1375 * Verifies that our cached values of the VMCS fields are all consistent with
1376 * what's actually present in the VMCS.
1377 *
1378 * @returns VBox status code.
1379 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1380 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1381 * VMCS content. HMCPU error-field is
1382 * updated, see VMX_VCI_XXX.
1383 * @param pVCpu The cross context virtual CPU structure.
1384 * @param pVmcsInfo The VMCS info. object.
1385 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1386 */
1387static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1388{
1389 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1390
1391 uint32_t u32Val;
1392 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1393 AssertRC(rc);
1394 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1395 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1396 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1397 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1398
1399 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1400 AssertRC(rc);
1401 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1402 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1403 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1404 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1405
1406 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1407 AssertRC(rc);
1408 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1409 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1410 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1411 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1412
1413 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1414 AssertRC(rc);
1415 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1416 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1417 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1418 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1419
1420 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1421 {
1422 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1423 AssertRC(rc);
1424 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1425 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1426 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1427 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1428 }
1429
1430 uint64_t u64Val;
1431 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1432 {
1433 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1434 AssertRC(rc);
1435 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1436 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1437 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1438 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1439 }
1440
1441 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1442 AssertRC(rc);
1443 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1444 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1445 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1446 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1447
1448 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1449 AssertRC(rc);
1450 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1451 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1452 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1453 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1454
1455 NOREF(pcszVmcs);
1456 return VINF_SUCCESS;
1457}
1458
1459
1460/**
1461 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1462 * VMCS.
1463 *
1464 * This is typically required when the guest changes paging mode.
1465 *
1466 * @returns VBox status code.
1467 * @param pVCpu The cross context virtual CPU structure.
1468 * @param pVmxTransient The VMX-transient structure.
1469 *
1470 * @remarks Requires EFER.
1471 * @remarks No-long-jump zone!!!
1472 */
1473static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1474{
1475 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1476 {
1477 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1478 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1479
1480 /*
1481 * VM-entry controls.
1482 */
1483 {
1484 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1485 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1486
1487 /*
1488 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1489 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1490 *
1491 * For nested-guests, this is a mandatory VM-entry control. It's also
1492 * required because we do not want to leak host bits to the nested-guest.
1493 */
1494 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1495
1496 /*
1497 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1498 *
1499 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1500 * required to get the nested-guest working with hardware-assisted VMX execution.
1501 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1502 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1503 * here rather than while merging the guest VMCS controls.
1504 */
1505 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1506 {
1507 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1508 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1509 }
1510 else
1511 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1512
1513 /*
1514 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1515 *
1516 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1517 * regardless of whether the nested-guest VMCS specifies it because we are free to
1518 * load whatever MSRs we require and we do not need to modify the guest visible copy
1519 * of the VM-entry MSR load area.
1520 */
1521 if ( g_fHmVmxSupportsVmcsEfer
1522#ifndef IN_NEM_DARWIN
1523 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1524#endif
1525 )
1526 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1527 else
1528 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1529
1530 /*
1531 * The following should -not- be set (since we're not in SMM mode):
1532 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1533 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1534 */
1535
1536 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1537 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1538
1539 if ((fVal & fZap) == fVal)
1540 { /* likely */ }
1541 else
1542 {
1543 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1544 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1545 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1546 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1547 }
1548
1549 /* Commit it to the VMCS. */
1550 if (pVmcsInfo->u32EntryCtls != fVal)
1551 {
1552 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1553 AssertRC(rc);
1554 pVmcsInfo->u32EntryCtls = fVal;
1555 }
1556 }
1557
1558 /*
1559 * VM-exit controls.
1560 */
1561 {
1562 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1563 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1564
1565 /*
1566 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1567 * supported the 1-setting of this bit.
1568 *
1569 * For nested-guests, we set the "save debug controls" as the converse
1570 * "load debug controls" is mandatory for nested-guests anyway.
1571 */
1572 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1573
1574 /*
1575 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1576 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1577 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1578 * vmxHCExportHostMsrs().
1579 *
1580 * For nested-guests, we always set this bit as we do not support 32-bit
1581 * hosts.
1582 */
1583 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1584
1585#ifndef IN_NEM_DARWIN
1586 /*
1587 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1588 *
1589 * For nested-guests, we should use the "save IA32_EFER" control if we also
1590 * used the "load IA32_EFER" control while exporting VM-entry controls.
1591 */
1592 if ( g_fHmVmxSupportsVmcsEfer
1593 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1594 {
1595 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1596 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1597 }
1598#endif
1599
1600 /*
1601 * Enable saving of the VMX-preemption timer value on VM-exit.
1602 * For nested-guests, currently not exposed/used.
1603 */
1604 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1605 * the timer value. */
1606 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1607 {
1608 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1609 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1610 }
1611
1612 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1613 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1614
1615 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1616 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1617 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1618
1619 if ((fVal & fZap) == fVal)
1620 { /* likely */ }
1621 else
1622 {
1623 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1624 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1625 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1626 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1627 }
1628
1629 /* Commit it to the VMCS. */
1630 if (pVmcsInfo->u32ExitCtls != fVal)
1631 {
1632 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1633 AssertRC(rc);
1634 pVmcsInfo->u32ExitCtls = fVal;
1635 }
1636 }
1637
1638 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1639 }
1640 return VINF_SUCCESS;
1641}
1642
1643
1644/**
1645 * Sets the TPR threshold in the VMCS.
1646 *
1647 * @param pVCpu The cross context virtual CPU structure.
1648 * @param pVmcsInfo The VMCS info. object.
1649 * @param u32TprThreshold The TPR threshold (task-priority class only).
1650 */
1651DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1652{
1653 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1654 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1655 RT_NOREF(pVmcsInfo);
1656 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1657 AssertRC(rc);
1658}
1659
1660
1661/**
1662 * Exports the guest APIC TPR state into the VMCS.
1663 *
1664 * @param pVCpu The cross context virtual CPU structure.
1665 * @param pVmxTransient The VMX-transient structure.
1666 *
1667 * @remarks No-long-jump zone!!!
1668 */
1669static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1670{
1671 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1672 {
1673 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1674
1675 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1676 if (!pVmxTransient->fIsNestedGuest)
1677 {
1678 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1679 && APICIsEnabled(pVCpu))
1680 {
1681 /*
1682 * Setup TPR shadowing.
1683 */
1684 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1685 {
1686 bool fPendingIntr = false;
1687 uint8_t u8Tpr = 0;
1688 uint8_t u8PendingIntr = 0;
1689 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1690 AssertRC(rc);
1691
1692 /*
1693 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1694 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1695 * priority of the pending interrupt so we can deliver the interrupt. If there
1696 * are no interrupts pending, set threshold to 0 to not cause any
1697 * TPR-below-threshold VM-exits.
1698 */
1699 uint32_t u32TprThreshold = 0;
1700 if (fPendingIntr)
1701 {
1702 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1703 (which is the Task-Priority Class). */
1704 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1705 const uint8_t u8TprPriority = u8Tpr >> 4;
1706 if (u8PendingPriority <= u8TprPriority)
1707 u32TprThreshold = u8PendingPriority;
1708 }
1709
1710 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1711 }
1712 }
1713 }
1714 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1715 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1716 }
1717}
1718
1719
1720/**
1721 * Gets the guest interruptibility-state and updates related force-flags.
1722 *
1723 * @returns Guest's interruptibility-state.
1724 * @param pVCpu The cross context virtual CPU structure.
1725 *
1726 * @remarks No-long-jump zone!!!
1727 */
1728static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1729{
1730 /*
1731 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1732 */
1733 uint32_t fIntrState = 0;
1734 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1735 {
1736 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1737 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1738
1739 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1740 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1741 {
1742 if (pCtx->eflags.Bits.u1IF)
1743 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1744 else
1745 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1746 }
1747 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1748 {
1749 /*
1750 * We can clear the inhibit force flag as even if we go back to the recompiler
1751 * without executing guest code in VT-x, the flag's condition to be cleared is
1752 * met and thus the cleared state is correct.
1753 */
1754 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1755 }
1756 }
1757
1758 /*
1759 * Check if we should inhibit NMI delivery.
1760 */
1761 if (CPUMIsGuestNmiBlocking(pVCpu))
1762 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1763
1764 /*
1765 * Validate.
1766 */
1767#ifdef VBOX_STRICT
1768 /* We don't support block-by-SMI yet.*/
1769 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1770
1771 /* Block-by-STI must not be set when interrupts are disabled. */
1772 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1773 {
1774 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1775 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1776 }
1777#endif
1778
1779 return fIntrState;
1780}
1781
1782
1783/**
1784 * Exports the exception intercepts required for guest execution in the VMCS.
1785 *
1786 * @param pVCpu The cross context virtual CPU structure.
1787 * @param pVmxTransient The VMX-transient structure.
1788 *
1789 * @remarks No-long-jump zone!!!
1790 */
1791static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1792{
1793 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1794 {
1795 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1796 if ( !pVmxTransient->fIsNestedGuest
1797 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1798 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1799 else
1800 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1801
1802 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1803 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1804 }
1805}
1806
1807
1808/**
1809 * Exports the guest's RIP into the guest-state area in the VMCS.
1810 *
1811 * @param pVCpu The cross context virtual CPU structure.
1812 *
1813 * @remarks No-long-jump zone!!!
1814 */
1815static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1816{
1817 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1818 {
1819 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1820
1821 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1822 AssertRC(rc);
1823
1824 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1825 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1826 }
1827}
1828
1829
1830/**
1831 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1832 *
1833 * @param pVCpu The cross context virtual CPU structure.
1834 * @param pVmxTransient The VMX-transient structure.
1835 *
1836 * @remarks No-long-jump zone!!!
1837 */
1838static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1839{
1840 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1841 {
1842 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1843
1844 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1845 Let us assert it as such and use 32-bit VMWRITE. */
1846 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1847 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1848 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1849 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1850
1851#ifndef IN_NEM_DARWIN
1852 /*
1853 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1854 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1855 * can run the real-mode guest code under Virtual 8086 mode.
1856 */
1857 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1858 if (pVmcsInfo->RealMode.fRealOnV86Active)
1859 {
1860 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1861 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1862 Assert(!pVmxTransient->fIsNestedGuest);
1863 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1864 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1865 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1866 }
1867#else
1868 RT_NOREF(pVmxTransient);
1869#endif
1870
1871 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1872 AssertRC(rc);
1873
1874 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1875 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1876 }
1877}
1878
1879
1880#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1881/**
1882 * Copies the nested-guest VMCS to the shadow VMCS.
1883 *
1884 * @returns VBox status code.
1885 * @param pVCpu The cross context virtual CPU structure.
1886 * @param pVmcsInfo The VMCS info. object.
1887 *
1888 * @remarks No-long-jump zone!!!
1889 */
1890static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1891{
1892 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1893 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1894
1895 /*
1896 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1897 * current VMCS, as we may try saving guest lazy MSRs.
1898 *
1899 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1900 * calling the import VMCS code which is currently performing the guest MSR reads
1901 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1902 * and the rest of the VMX leave session machinery.
1903 */
1904 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1905
1906 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1907 if (RT_SUCCESS(rc))
1908 {
1909 /*
1910 * Copy all guest read/write VMCS fields.
1911 *
1912 * We don't check for VMWRITE failures here for performance reasons and
1913 * because they are not expected to fail, barring irrecoverable conditions
1914 * like hardware errors.
1915 */
1916 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1917 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1918 {
1919 uint64_t u64Val;
1920 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1921 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1922 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1923 }
1924
1925 /*
1926 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1927 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1928 */
1929 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1930 {
1931 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1932 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1933 {
1934 uint64_t u64Val;
1935 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1936 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1937 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1938 }
1939 }
1940
1941 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1942 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1943 }
1944
1945 ASMSetFlags(fEFlags);
1946 return rc;
1947}
1948
1949
1950/**
1951 * Copies the shadow VMCS to the nested-guest VMCS.
1952 *
1953 * @returns VBox status code.
1954 * @param pVCpu The cross context virtual CPU structure.
1955 * @param pVmcsInfo The VMCS info. object.
1956 *
1957 * @remarks Called with interrupts disabled.
1958 */
1959static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1960{
1961 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1962 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1963 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1964
1965 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1966 if (RT_SUCCESS(rc))
1967 {
1968 /*
1969 * Copy guest read/write fields from the shadow VMCS.
1970 * Guest read-only fields cannot be modified, so no need to copy them.
1971 *
1972 * We don't check for VMREAD failures here for performance reasons and
1973 * because they are not expected to fail, barring irrecoverable conditions
1974 * like hardware errors.
1975 */
1976 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1977 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1978 {
1979 uint64_t u64Val;
1980 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1981 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1982 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1983 }
1984
1985 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1986 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1987 }
1988 return rc;
1989}
1990
1991
1992/**
1993 * Enables VMCS shadowing for the given VMCS info. object.
1994 *
1995 * @param pVCpu The cross context virtual CPU structure.
1996 * @param pVmcsInfo The VMCS info. object.
1997 *
1998 * @remarks No-long-jump zone!!!
1999 */
2000static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2001{
2002 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2003 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
2004 {
2005 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
2006 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
2007 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2008 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
2009 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2010 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
2011 Log4Func(("Enabled\n"));
2012 }
2013}
2014
2015
2016/**
2017 * Disables VMCS shadowing for the given VMCS info. object.
2018 *
2019 * @param pVCpu The cross context virtual CPU structure.
2020 * @param pVmcsInfo The VMCS info. object.
2021 *
2022 * @remarks No-long-jump zone!!!
2023 */
2024static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2025{
2026 /*
2027 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2028 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2029 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2030 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2031 *
2032 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2033 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2034 */
2035 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2036 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2037 {
2038 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2039 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2040 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2041 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2042 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2043 Log4Func(("Disabled\n"));
2044 }
2045}
2046#endif
2047
2048
2049/**
2050 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2051 *
2052 * The guest FPU state is always pre-loaded hence we don't need to bother about
2053 * sharing FPU related CR0 bits between the guest and host.
2054 *
2055 * @returns VBox status code.
2056 * @param pVCpu The cross context virtual CPU structure.
2057 * @param pVmxTransient The VMX-transient structure.
2058 *
2059 * @remarks No-long-jump zone!!!
2060 */
2061static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2062{
2063 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2064 {
2065 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2066 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2067
2068 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2069 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2070 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2071 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2072 else
2073 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2074
2075 if (!pVmxTransient->fIsNestedGuest)
2076 {
2077 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2078 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2079 uint64_t const u64ShadowCr0 = u64GuestCr0;
2080 Assert(!RT_HI_U32(u64GuestCr0));
2081
2082 /*
2083 * Setup VT-x's view of the guest CR0.
2084 */
2085 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2086 if (VM_IS_VMX_NESTED_PAGING(pVM))
2087 {
2088#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2089 if (CPUMIsGuestPagingEnabled(pVCpu))
2090 {
2091 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2092 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2093 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2094 }
2095 else
2096 {
2097 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2098 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2099 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2100 }
2101
2102 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2103 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2104 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2105#endif
2106 }
2107 else
2108 {
2109 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2110 u64GuestCr0 |= X86_CR0_WP;
2111 }
2112
2113 /*
2114 * Guest FPU bits.
2115 *
2116 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2117 * using CR0.TS.
2118 *
2119 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2120 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2121 */
2122 u64GuestCr0 |= X86_CR0_NE;
2123
2124 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2125 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2126
2127 /*
2128 * Update exception intercepts.
2129 */
2130 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2131#ifndef IN_NEM_DARWIN
2132 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2133 {
2134 Assert(PDMVmmDevHeapIsEnabled(pVM));
2135 Assert(pVM->hm.s.vmx.pRealModeTSS);
2136 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2137 }
2138 else
2139#endif
2140 {
2141 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2142 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2143 if (fInterceptMF)
2144 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2145 }
2146
2147 /* Additional intercepts for debugging, define these yourself explicitly. */
2148#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2149 uXcptBitmap |= 0
2150 | RT_BIT(X86_XCPT_BP)
2151 | RT_BIT(X86_XCPT_DE)
2152 | RT_BIT(X86_XCPT_NM)
2153 | RT_BIT(X86_XCPT_TS)
2154 | RT_BIT(X86_XCPT_UD)
2155 | RT_BIT(X86_XCPT_NP)
2156 | RT_BIT(X86_XCPT_SS)
2157 | RT_BIT(X86_XCPT_GP)
2158 | RT_BIT(X86_XCPT_PF)
2159 | RT_BIT(X86_XCPT_MF)
2160 ;
2161#elif defined(HMVMX_ALWAYS_TRAP_PF)
2162 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2163#endif
2164 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2165 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2166 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2167
2168 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2169 u64GuestCr0 |= fSetCr0;
2170 u64GuestCr0 &= fZapCr0;
2171 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2172
2173 /* Commit the CR0 and related fields to the guest VMCS. */
2174 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2175 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2176 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2177 {
2178 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2179 AssertRC(rc);
2180 }
2181 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2182 {
2183 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2184 AssertRC(rc);
2185 }
2186
2187 /* Update our caches. */
2188 pVmcsInfo->u32ProcCtls = uProcCtls;
2189 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2190
2191 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2192 }
2193 else
2194 {
2195 /*
2196 * With nested-guests, we may have extended the guest/host mask here since we
2197 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2198 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2199 * originally supplied. We must copy those bits from the nested-guest CR0 into
2200 * the nested-guest CR0 read-shadow.
2201 */
2202 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2203 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2204 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2205 Assert(!RT_HI_U32(u64GuestCr0));
2206 Assert(u64GuestCr0 & X86_CR0_NE);
2207
2208 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2209 u64GuestCr0 |= fSetCr0;
2210 u64GuestCr0 &= fZapCr0;
2211 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2212
2213 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2214 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2215 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2216
2217 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2218 }
2219
2220 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2221 }
2222
2223 return VINF_SUCCESS;
2224}
2225
2226
2227/**
2228 * Exports the guest control registers (CR3, CR4) into the guest-state area
2229 * in the VMCS.
2230 *
2231 * @returns VBox strict status code.
2232 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2233 * without unrestricted guest access and the VMMDev is not presently
2234 * mapped (e.g. EFI32).
2235 *
2236 * @param pVCpu The cross context virtual CPU structure.
2237 * @param pVmxTransient The VMX-transient structure.
2238 *
2239 * @remarks No-long-jump zone!!!
2240 */
2241static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2242{
2243 int rc = VINF_SUCCESS;
2244 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2245
2246 /*
2247 * Guest CR2.
2248 * It's always loaded in the assembler code. Nothing to do here.
2249 */
2250
2251 /*
2252 * Guest CR3.
2253 */
2254 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2255 {
2256 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2257
2258 if (VM_IS_VMX_NESTED_PAGING(pVM))
2259 {
2260#ifndef IN_NEM_DARWIN
2261 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2262 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2263
2264 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2265 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2266 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2267 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2268
2269 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2270 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2271 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2272
2273 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2274 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2275 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2276 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2277 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2278 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2279 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2280
2281 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2282 AssertRC(rc);
2283#endif
2284
2285 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2286 uint64_t u64GuestCr3 = pCtx->cr3;
2287 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2288 || CPUMIsGuestPagingEnabledEx(pCtx))
2289 {
2290 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2291 if (CPUMIsGuestInPAEModeEx(pCtx))
2292 {
2293 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2294 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2295 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2296 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2297 }
2298
2299 /*
2300 * The guest's view of its CR3 is unblemished with nested paging when the
2301 * guest is using paging or we have unrestricted guest execution to handle
2302 * the guest when it's not using paging.
2303 */
2304 }
2305#ifndef IN_NEM_DARWIN
2306 else
2307 {
2308 /*
2309 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2310 * thinks it accesses physical memory directly, we use our identity-mapped
2311 * page table to map guest-linear to guest-physical addresses. EPT takes care
2312 * of translating it to host-physical addresses.
2313 */
2314 RTGCPHYS GCPhys;
2315 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2316
2317 /* We obtain it here every time as the guest could have relocated this PCI region. */
2318 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2319 if (RT_SUCCESS(rc))
2320 { /* likely */ }
2321 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2322 {
2323 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2324 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2325 }
2326 else
2327 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2328
2329 u64GuestCr3 = GCPhys;
2330 }
2331#endif
2332
2333 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2334 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2335 AssertRC(rc);
2336 }
2337 else
2338 {
2339 Assert(!pVmxTransient->fIsNestedGuest);
2340 /* Non-nested paging case, just use the hypervisor's CR3. */
2341 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2342
2343 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2344 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2345 AssertRC(rc);
2346 }
2347
2348 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2349 }
2350
2351 /*
2352 * Guest CR4.
2353 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2354 */
2355 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2356 {
2357 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2358 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2359
2360 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2361 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2362
2363 /*
2364 * With nested-guests, we may have extended the guest/host mask here (since we
2365 * merged in the outer guest's mask, see vmxHCMergeVmcsNested). This means, the
2366 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2367 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2368 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2369 */
2370 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2371 uint64_t u64GuestCr4 = pCtx->cr4;
2372 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2373 ? pCtx->cr4
2374 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2375 Assert(!RT_HI_U32(u64GuestCr4));
2376
2377#ifndef IN_NEM_DARWIN
2378 /*
2379 * Setup VT-x's view of the guest CR4.
2380 *
2381 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2382 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2383 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2384 *
2385 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2386 */
2387 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2388 {
2389 Assert(pVM->hm.s.vmx.pRealModeTSS);
2390 Assert(PDMVmmDevHeapIsEnabled(pVM));
2391 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2392 }
2393#endif
2394
2395 if (VM_IS_VMX_NESTED_PAGING(pVM))
2396 {
2397 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2398 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2399 {
2400 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2401 u64GuestCr4 |= X86_CR4_PSE;
2402 /* Our identity mapping is a 32-bit page directory. */
2403 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2404 }
2405 /* else use guest CR4.*/
2406 }
2407 else
2408 {
2409 Assert(!pVmxTransient->fIsNestedGuest);
2410
2411 /*
2412 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2413 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2414 */
2415 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2416 {
2417 case PGMMODE_REAL: /* Real-mode. */
2418 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2419 case PGMMODE_32_BIT: /* 32-bit paging. */
2420 {
2421 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2422 break;
2423 }
2424
2425 case PGMMODE_PAE: /* PAE paging. */
2426 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2427 {
2428 u64GuestCr4 |= X86_CR4_PAE;
2429 break;
2430 }
2431
2432 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2433 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2434 {
2435#ifdef VBOX_WITH_64_BITS_GUESTS
2436 /* For our assumption in vmxHCShouldSwapEferMsr. */
2437 Assert(u64GuestCr4 & X86_CR4_PAE);
2438 break;
2439#endif
2440 }
2441 default:
2442 AssertFailed();
2443 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2444 }
2445 }
2446
2447 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2448 u64GuestCr4 |= fSetCr4;
2449 u64GuestCr4 &= fZapCr4;
2450
2451 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2452 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2453 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2454
2455#ifndef IN_NEM_DARWIN
2456 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2457 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2458 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2459 {
2460 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2461 hmR0VmxUpdateStartVmFunction(pVCpu);
2462 }
2463#endif
2464
2465 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2466
2467 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2468 }
2469 return rc;
2470}
2471
2472
2473#ifdef VBOX_STRICT
2474/**
2475 * Strict function to validate segment registers.
2476 *
2477 * @param pVCpu The cross context virtual CPU structure.
2478 * @param pVmcsInfo The VMCS info. object.
2479 *
2480 * @remarks Will import guest CR0 on strict builds during validation of
2481 * segments.
2482 */
2483static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2484{
2485 /*
2486 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2487 *
2488 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2489 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2490 * unusable bit and doesn't change the guest-context value.
2491 */
2492 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2493 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2494 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2495 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2496 && ( !CPUMIsGuestInRealModeEx(pCtx)
2497 && !CPUMIsGuestInV86ModeEx(pCtx)))
2498 {
2499 /* Protected mode checks */
2500 /* CS */
2501 Assert(pCtx->cs.Attr.n.u1Present);
2502 Assert(!(pCtx->cs.Attr.u & 0xf00));
2503 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2504 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2505 || !(pCtx->cs.Attr.n.u1Granularity));
2506 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2507 || (pCtx->cs.Attr.n.u1Granularity));
2508 /* CS cannot be loaded with NULL in protected mode. */
2509 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2510 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2511 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2512 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2513 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2514 else
2515 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2516 /* SS */
2517 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2518 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2519 if ( !(pCtx->cr0 & X86_CR0_PE)
2520 || pCtx->cs.Attr.n.u4Type == 3)
2521 {
2522 Assert(!pCtx->ss.Attr.n.u2Dpl);
2523 }
2524 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2525 {
2526 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2527 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2528 Assert(pCtx->ss.Attr.n.u1Present);
2529 Assert(!(pCtx->ss.Attr.u & 0xf00));
2530 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2531 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2532 || !(pCtx->ss.Attr.n.u1Granularity));
2533 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2534 || (pCtx->ss.Attr.n.u1Granularity));
2535 }
2536 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2537 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2538 {
2539 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2540 Assert(pCtx->ds.Attr.n.u1Present);
2541 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2542 Assert(!(pCtx->ds.Attr.u & 0xf00));
2543 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2544 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2545 || !(pCtx->ds.Attr.n.u1Granularity));
2546 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2547 || (pCtx->ds.Attr.n.u1Granularity));
2548 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2549 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2550 }
2551 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2552 {
2553 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2554 Assert(pCtx->es.Attr.n.u1Present);
2555 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2556 Assert(!(pCtx->es.Attr.u & 0xf00));
2557 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2558 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2559 || !(pCtx->es.Attr.n.u1Granularity));
2560 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2561 || (pCtx->es.Attr.n.u1Granularity));
2562 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2563 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2564 }
2565 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2566 {
2567 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2568 Assert(pCtx->fs.Attr.n.u1Present);
2569 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2570 Assert(!(pCtx->fs.Attr.u & 0xf00));
2571 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2572 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2573 || !(pCtx->fs.Attr.n.u1Granularity));
2574 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2575 || (pCtx->fs.Attr.n.u1Granularity));
2576 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2577 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2578 }
2579 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2580 {
2581 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2582 Assert(pCtx->gs.Attr.n.u1Present);
2583 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2584 Assert(!(pCtx->gs.Attr.u & 0xf00));
2585 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2586 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2587 || !(pCtx->gs.Attr.n.u1Granularity));
2588 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2589 || (pCtx->gs.Attr.n.u1Granularity));
2590 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2591 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2592 }
2593 /* 64-bit capable CPUs. */
2594 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2595 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2596 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2597 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2598 }
2599 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2600 || ( CPUMIsGuestInRealModeEx(pCtx)
2601 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2602 {
2603 /* Real and v86 mode checks. */
2604 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2605 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2606#ifndef IN_NEM_DARWIN
2607 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2608 {
2609 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2610 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2611 }
2612 else
2613#endif
2614 {
2615 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2616 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2617 }
2618
2619 /* CS */
2620 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2621 Assert(pCtx->cs.u32Limit == 0xffff);
2622 Assert(u32CSAttr == 0xf3);
2623 /* SS */
2624 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2625 Assert(pCtx->ss.u32Limit == 0xffff);
2626 Assert(u32SSAttr == 0xf3);
2627 /* DS */
2628 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2629 Assert(pCtx->ds.u32Limit == 0xffff);
2630 Assert(u32DSAttr == 0xf3);
2631 /* ES */
2632 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2633 Assert(pCtx->es.u32Limit == 0xffff);
2634 Assert(u32ESAttr == 0xf3);
2635 /* FS */
2636 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2637 Assert(pCtx->fs.u32Limit == 0xffff);
2638 Assert(u32FSAttr == 0xf3);
2639 /* GS */
2640 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2641 Assert(pCtx->gs.u32Limit == 0xffff);
2642 Assert(u32GSAttr == 0xf3);
2643 /* 64-bit capable CPUs. */
2644 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2645 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2646 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2647 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2648 }
2649}
2650#endif /* VBOX_STRICT */
2651
2652
2653/**
2654 * Exports a guest segment register into the guest-state area in the VMCS.
2655 *
2656 * @returns VBox status code.
2657 * @param pVCpu The cross context virtual CPU structure.
2658 * @param pVmcsInfo The VMCS info. object.
2659 * @param iSegReg The segment register number (X86_SREG_XXX).
2660 * @param pSelReg Pointer to the segment selector.
2661 *
2662 * @remarks No-long-jump zone!!!
2663 */
2664static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2665{
2666 Assert(iSegReg < X86_SREG_COUNT);
2667
2668 uint32_t u32Access = pSelReg->Attr.u;
2669#ifndef IN_NEM_DARWIN
2670 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2671#endif
2672 {
2673 /*
2674 * The way to differentiate between whether this is really a null selector or was just
2675 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2676 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2677 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2678 * NULL selectors loaded in protected-mode have their attribute as 0.
2679 */
2680 if (u32Access)
2681 { }
2682 else
2683 u32Access = X86DESCATTR_UNUSABLE;
2684 }
2685#ifndef IN_NEM_DARWIN
2686 else
2687 {
2688 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2689 u32Access = 0xf3;
2690 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2691 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2692 RT_NOREF_PV(pVCpu);
2693 }
2694#else
2695 RT_NOREF(pVmcsInfo);
2696#endif
2697
2698 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2699 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2700 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2701
2702 /*
2703 * Commit it to the VMCS.
2704 */
2705 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
2706 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
2707 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
2708 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
2709 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2710 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2711 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2712 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2713 return VINF_SUCCESS;
2714}
2715
2716
2717/**
2718 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2719 * area in the VMCS.
2720 *
2721 * @returns VBox status code.
2722 * @param pVCpu The cross context virtual CPU structure.
2723 * @param pVmxTransient The VMX-transient structure.
2724 *
2725 * @remarks Will import guest CR0 on strict builds during validation of
2726 * segments.
2727 * @remarks No-long-jump zone!!!
2728 */
2729static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2730{
2731 int rc = VERR_INTERNAL_ERROR_5;
2732#ifndef IN_NEM_DARWIN
2733 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2734#endif
2735 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2736 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2737#ifndef IN_NEM_DARWIN
2738 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2739#endif
2740
2741 /*
2742 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2743 */
2744 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2745 {
2746 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2747 {
2748 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2749#ifndef IN_NEM_DARWIN
2750 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2751 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2752#endif
2753 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2754 AssertRC(rc);
2755 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2756 }
2757
2758 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2759 {
2760 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2761#ifndef IN_NEM_DARWIN
2762 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2763 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2764#endif
2765 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2766 AssertRC(rc);
2767 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2768 }
2769
2770 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2771 {
2772 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2773#ifndef IN_NEM_DARWIN
2774 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2775 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2776#endif
2777 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2778 AssertRC(rc);
2779 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2780 }
2781
2782 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2783 {
2784 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2785#ifndef IN_NEM_DARWIN
2786 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2787 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2788#endif
2789 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2790 AssertRC(rc);
2791 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2792 }
2793
2794 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2795 {
2796 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2797#ifndef IN_NEM_DARWIN
2798 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2799 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2800#endif
2801 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2802 AssertRC(rc);
2803 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2804 }
2805
2806 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2807 {
2808 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2809#ifndef IN_NEM_DARWIN
2810 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2811 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2812#endif
2813 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2814 AssertRC(rc);
2815 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2816 }
2817
2818#ifdef VBOX_STRICT
2819 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2820#endif
2821 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2822 pCtx->cs.Attr.u));
2823 }
2824
2825 /*
2826 * Guest TR.
2827 */
2828 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2829 {
2830 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2831
2832 /*
2833 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2834 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2835 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2836 */
2837 uint16_t u16Sel;
2838 uint32_t u32Limit;
2839 uint64_t u64Base;
2840 uint32_t u32AccessRights;
2841#ifndef IN_NEM_DARWIN
2842 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2843#endif
2844 {
2845 u16Sel = pCtx->tr.Sel;
2846 u32Limit = pCtx->tr.u32Limit;
2847 u64Base = pCtx->tr.u64Base;
2848 u32AccessRights = pCtx->tr.Attr.u;
2849 }
2850#ifndef IN_NEM_DARWIN
2851 else
2852 {
2853 Assert(!pVmxTransient->fIsNestedGuest);
2854 Assert(pVM->hm.s.vmx.pRealModeTSS);
2855 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2856
2857 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2858 RTGCPHYS GCPhys;
2859 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2860 AssertRCReturn(rc, rc);
2861
2862 X86DESCATTR DescAttr;
2863 DescAttr.u = 0;
2864 DescAttr.n.u1Present = 1;
2865 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2866
2867 u16Sel = 0;
2868 u32Limit = HM_VTX_TSS_SIZE;
2869 u64Base = GCPhys;
2870 u32AccessRights = DescAttr.u;
2871 }
2872#endif
2873
2874 /* Validate. */
2875 Assert(!(u16Sel & RT_BIT(2)));
2876 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2877 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2878 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2879 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2880 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2881 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2882 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2883 Assert( (u32Limit & 0xfff) == 0xfff
2884 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2885 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2886 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2887
2888 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2889 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2890 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2891 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2892
2893 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2894 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2895 }
2896
2897 /*
2898 * Guest GDTR.
2899 */
2900 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2901 {
2902 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2903
2904 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2905 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2906
2907 /* Validate. */
2908 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2909
2910 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2911 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2912 }
2913
2914 /*
2915 * Guest LDTR.
2916 */
2917 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2918 {
2919 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2920
2921 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2922 uint32_t u32Access;
2923 if ( !pVmxTransient->fIsNestedGuest
2924 && !pCtx->ldtr.Attr.u)
2925 u32Access = X86DESCATTR_UNUSABLE;
2926 else
2927 u32Access = pCtx->ldtr.Attr.u;
2928
2929 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2930 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2931 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2932 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2933
2934 /* Validate. */
2935 if (!(u32Access & X86DESCATTR_UNUSABLE))
2936 {
2937 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2938 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2939 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2940 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2941 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2942 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2943 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2944 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2945 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2946 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2947 }
2948
2949 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2950 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2951 }
2952
2953 /*
2954 * Guest IDTR.
2955 */
2956 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2957 {
2958 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2959
2960 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2961 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2962
2963 /* Validate. */
2964 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2965
2966 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2967 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2968 }
2969
2970 return VINF_SUCCESS;
2971}
2972
2973
2974/**
2975 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2976 * VM-exit interruption info type.
2977 *
2978 * @returns The IEM exception flags.
2979 * @param uVector The event vector.
2980 * @param uVmxEventType The VMX event type.
2981 *
2982 * @remarks This function currently only constructs flags required for
2983 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2984 * and CR2 aspects of an exception are not included).
2985 */
2986static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2987{
2988 uint32_t fIemXcptFlags;
2989 switch (uVmxEventType)
2990 {
2991 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2992 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2993 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2994 break;
2995
2996 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
2997 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
2998 break;
2999
3000 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
3001 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
3002 break;
3003
3004 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
3005 {
3006 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3007 if (uVector == X86_XCPT_BP)
3008 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
3009 else if (uVector == X86_XCPT_OF)
3010 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
3011 else
3012 {
3013 fIemXcptFlags = 0;
3014 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
3015 }
3016 break;
3017 }
3018
3019 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3020 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3021 break;
3022
3023 default:
3024 fIemXcptFlags = 0;
3025 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3026 break;
3027 }
3028 return fIemXcptFlags;
3029}
3030
3031
3032/**
3033 * Sets an event as a pending event to be injected into the guest.
3034 *
3035 * @param pVCpu The cross context virtual CPU structure.
3036 * @param u32IntInfo The VM-entry interruption-information field.
3037 * @param cbInstr The VM-entry instruction length in bytes (for
3038 * software interrupts, exceptions and privileged
3039 * software exceptions).
3040 * @param u32ErrCode The VM-entry exception error code.
3041 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3042 * page-fault.
3043 */
3044DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3045 RTGCUINTPTR GCPtrFaultAddress)
3046{
3047 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3048 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3049 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3050 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3051 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3052 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3053}
3054
3055
3056/**
3057 * Sets an external interrupt as pending-for-injection into the VM.
3058 *
3059 * @param pVCpu The cross context virtual CPU structure.
3060 * @param u8Interrupt The external interrupt vector.
3061 */
3062DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3063{
3064 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3065 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3066 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3067 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3068 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3069}
3070
3071
3072/**
3073 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3074 *
3075 * @param pVCpu The cross context virtual CPU structure.
3076 */
3077DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3078{
3079 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3080 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3081 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3082 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3083 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3084}
3085
3086
3087/**
3088 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3089 *
3090 * @param pVCpu The cross context virtual CPU structure.
3091 */
3092DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3093{
3094 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3095 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3096 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3097 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3098 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3099}
3100
3101
3102/**
3103 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3104 *
3105 * @param pVCpu The cross context virtual CPU structure.
3106 */
3107DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3108{
3109 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3110 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3111 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3112 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3113 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3114}
3115
3116
3117/**
3118 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3119 *
3120 * @param pVCpu The cross context virtual CPU structure.
3121 */
3122DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3123{
3124 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3125 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3126 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3127 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3128 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3129}
3130
3131
3132#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3133/**
3134 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3135 *
3136 * @param pVCpu The cross context virtual CPU structure.
3137 * @param u32ErrCode The error code for the general-protection exception.
3138 */
3139DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3140{
3141 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3142 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3143 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3144 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3145 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3146}
3147
3148
3149/**
3150 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3151 *
3152 * @param pVCpu The cross context virtual CPU structure.
3153 * @param u32ErrCode The error code for the stack exception.
3154 */
3155DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3156{
3157 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3158 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3159 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3160 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3161 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3162}
3163#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3164
3165
3166/**
3167 * Fixes up attributes for the specified segment register.
3168 *
3169 * @param pVCpu The cross context virtual CPU structure.
3170 * @param pSelReg The segment register that needs fixing.
3171 * @param pszRegName The register name (for logging and assertions).
3172 */
3173static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3174{
3175 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3176
3177 /*
3178 * If VT-x marks the segment as unusable, most other bits remain undefined:
3179 * - For CS the L, D and G bits have meaning.
3180 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3181 * - For the remaining data segments no bits are defined.
3182 *
3183 * The present bit and the unusable bit has been observed to be set at the
3184 * same time (the selector was supposed to be invalid as we started executing
3185 * a V8086 interrupt in ring-0).
3186 *
3187 * What should be important for the rest of the VBox code, is that the P bit is
3188 * cleared. Some of the other VBox code recognizes the unusable bit, but
3189 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3190 * safe side here, we'll strip off P and other bits we don't care about. If
3191 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3192 *
3193 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3194 */
3195#ifdef VBOX_STRICT
3196 uint32_t const uAttr = pSelReg->Attr.u;
3197#endif
3198
3199 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3200 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3201 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3202
3203#ifdef VBOX_STRICT
3204# ifndef IN_NEM_DARWIN
3205 VMMRZCallRing3Disable(pVCpu);
3206# endif
3207 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3208# ifdef DEBUG_bird
3209 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3210 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3211 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3212# endif
3213# ifndef IN_NEM_DARWIN
3214 VMMRZCallRing3Enable(pVCpu);
3215# endif
3216 NOREF(uAttr);
3217#endif
3218 RT_NOREF2(pVCpu, pszRegName);
3219}
3220
3221
3222/**
3223 * Imports a guest segment register from the current VMCS into the guest-CPU
3224 * context.
3225 *
3226 * @param pVCpu The cross context virtual CPU structure.
3227 * @param iSegReg The segment register number (X86_SREG_XXX).
3228 *
3229 * @remarks Called with interrupts and/or preemption disabled.
3230 */
3231static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
3232{
3233 Assert(iSegReg < X86_SREG_COUNT);
3234 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
3235 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
3236 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
3237 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
3238
3239 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
3240
3241 uint16_t u16Sel;
3242 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
3243 pSelReg->Sel = u16Sel;
3244 pSelReg->ValidSel = u16Sel;
3245
3246 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3247 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
3248
3249 uint32_t u32Attr;
3250 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
3251 pSelReg->Attr.u = u32Attr;
3252 if (u32Attr & X86DESCATTR_UNUSABLE)
3253 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
3254
3255 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3256}
3257
3258
3259/**
3260 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3261 *
3262 * @param pVCpu The cross context virtual CPU structure.
3263 *
3264 * @remarks Called with interrupts and/or preemption disabled.
3265 */
3266static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3267{
3268 uint16_t u16Sel;
3269 uint64_t u64Base;
3270 uint32_t u32Limit, u32Attr;
3271 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3272 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3273 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3274 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3275
3276 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3277 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3278 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3279 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3280 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3281 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3282 if (u32Attr & X86DESCATTR_UNUSABLE)
3283 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3284}
3285
3286
3287/**
3288 * Imports the guest TR from the current VMCS into the guest-CPU context.
3289 *
3290 * @param pVCpu The cross context virtual CPU structure.
3291 *
3292 * @remarks Called with interrupts and/or preemption disabled.
3293 */
3294static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
3295{
3296 uint16_t u16Sel;
3297 uint64_t u64Base;
3298 uint32_t u32Limit, u32Attr;
3299 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3300 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3301 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3302 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3303
3304 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3305 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3306 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3307 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3308 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3309 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3310 /* TR is the only selector that can never be unusable. */
3311 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3312}
3313
3314
3315/**
3316 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3317 *
3318 * @param pVCpu The cross context virtual CPU structure.
3319 *
3320 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3321 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3322 * instead!!!
3323 */
3324static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
3325{
3326 uint64_t u64Val;
3327 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3328 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3329 {
3330 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3331 AssertRC(rc);
3332
3333 pCtx->rip = u64Val;
3334 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3335 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3336 }
3337}
3338
3339
3340/**
3341 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3342 *
3343 * @param pVCpu The cross context virtual CPU structure.
3344 * @param pVmcsInfo The VMCS info. object.
3345 *
3346 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3347 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3348 * instead!!!
3349 */
3350static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3351{
3352 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3353 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3354 {
3355 uint64_t u64Val;
3356 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3357 AssertRC(rc);
3358
3359 pCtx->rflags.u64 = u64Val;
3360#ifndef IN_NEM_DARWIN
3361 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3362 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3363 {
3364 pCtx->eflags.Bits.u1VM = 0;
3365 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3366 }
3367#else
3368 RT_NOREF(pVmcsInfo);
3369#endif
3370 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3371 }
3372}
3373
3374
3375/**
3376 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3377 * context.
3378 *
3379 * @param pVCpu The cross context virtual CPU structure.
3380 * @param pVmcsInfo The VMCS info. object.
3381 *
3382 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3383 * do not log!
3384 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3385 * instead!!!
3386 */
3387static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3388{
3389 uint32_t u32Val;
3390 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3391 if (!u32Val)
3392 {
3393 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3394 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3395 CPUMSetGuestNmiBlocking(pVCpu, false);
3396 }
3397 else
3398 {
3399 /*
3400 * We must import RIP here to set our EM interrupt-inhibited state.
3401 * We also import RFLAGS as our code that evaluates pending interrupts
3402 * before VM-entry requires it.
3403 */
3404 vmxHCImportGuestRip(pVCpu);
3405 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3406
3407 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3408 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3409 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3410 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3411
3412 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3413 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3414 }
3415}
3416
3417
3418/**
3419 * Worker for VMXR0ImportStateOnDemand.
3420 *
3421 * @returns VBox status code.
3422 * @param pVCpu The cross context virtual CPU structure.
3423 * @param pVmcsInfo The VMCS info. object.
3424 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3425 */
3426static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3427{
3428 int rc = VINF_SUCCESS;
3429 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3430 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3431 uint32_t u32Val;
3432
3433 /*
3434 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3435 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3436 * neither are other host platforms.
3437 *
3438 * Committing this temporarily as it prevents BSOD.
3439 *
3440 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3441 */
3442# ifdef RT_OS_WINDOWS
3443 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3444 return VERR_HM_IPE_1;
3445# endif
3446
3447 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3448
3449#ifndef IN_NEM_DARWIN
3450 /*
3451 * We disable interrupts to make the updating of the state and in particular
3452 * the fExtrn modification atomic wrt to preemption hooks.
3453 */
3454 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3455#endif
3456
3457 fWhat &= pCtx->fExtrn;
3458 if (fWhat)
3459 {
3460 do
3461 {
3462 if (fWhat & CPUMCTX_EXTRN_RIP)
3463 vmxHCImportGuestRip(pVCpu);
3464
3465 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3466 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3467
3468 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3469 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3470
3471 if (fWhat & CPUMCTX_EXTRN_RSP)
3472 {
3473 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3474 AssertRC(rc);
3475 }
3476
3477 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3478 {
3479 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3480#ifndef IN_NEM_DARWIN
3481 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3482#else
3483 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3484#endif
3485 if (fWhat & CPUMCTX_EXTRN_CS)
3486 {
3487 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
3488 vmxHCImportGuestRip(pVCpu);
3489 if (fRealOnV86Active)
3490 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3491 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3492 }
3493 if (fWhat & CPUMCTX_EXTRN_SS)
3494 {
3495 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
3496 if (fRealOnV86Active)
3497 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3498 }
3499 if (fWhat & CPUMCTX_EXTRN_DS)
3500 {
3501 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
3502 if (fRealOnV86Active)
3503 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3504 }
3505 if (fWhat & CPUMCTX_EXTRN_ES)
3506 {
3507 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
3508 if (fRealOnV86Active)
3509 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3510 }
3511 if (fWhat & CPUMCTX_EXTRN_FS)
3512 {
3513 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
3514 if (fRealOnV86Active)
3515 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3516 }
3517 if (fWhat & CPUMCTX_EXTRN_GS)
3518 {
3519 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
3520 if (fRealOnV86Active)
3521 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3522 }
3523 }
3524
3525 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3526 {
3527 if (fWhat & CPUMCTX_EXTRN_LDTR)
3528 vmxHCImportGuestLdtr(pVCpu);
3529
3530 if (fWhat & CPUMCTX_EXTRN_GDTR)
3531 {
3532 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3533 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3534 pCtx->gdtr.cbGdt = u32Val;
3535 }
3536
3537 /* Guest IDTR. */
3538 if (fWhat & CPUMCTX_EXTRN_IDTR)
3539 {
3540 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3541 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3542 pCtx->idtr.cbIdt = u32Val;
3543 }
3544
3545 /* Guest TR. */
3546 if (fWhat & CPUMCTX_EXTRN_TR)
3547 {
3548#ifndef IN_NEM_DARWIN
3549 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3550 don't need to import that one. */
3551 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3552#endif
3553 vmxHCImportGuestTr(pVCpu);
3554 }
3555 }
3556
3557 if (fWhat & CPUMCTX_EXTRN_DR7)
3558 {
3559#ifndef IN_NEM_DARWIN
3560 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3561#endif
3562 {
3563 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3564 AssertRC(rc);
3565 }
3566 }
3567
3568 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3569 {
3570 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3571 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3572 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3573 pCtx->SysEnter.cs = u32Val;
3574 }
3575
3576#ifndef IN_NEM_DARWIN
3577 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3578 {
3579 if ( pVM->hmr0.s.fAllow64BitGuests
3580 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3581 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3582 }
3583
3584 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3585 {
3586 if ( pVM->hmr0.s.fAllow64BitGuests
3587 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3588 {
3589 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3590 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3591 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3592 }
3593 }
3594
3595 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3596 {
3597 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3598 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3599 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3600 Assert(pMsrs);
3601 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3602 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3603 for (uint32_t i = 0; i < cMsrs; i++)
3604 {
3605 uint32_t const idMsr = pMsrs[i].u32Msr;
3606 switch (idMsr)
3607 {
3608 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3609 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3610 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3611 default:
3612 {
3613 uint32_t idxLbrMsr;
3614 if (VM_IS_VMX_LBR(pVM))
3615 {
3616 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3617 {
3618 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3619 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3620 break;
3621 }
3622 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3623 {
3624 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3625 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3626 break;
3627 }
3628 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3629 {
3630 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3631 break;
3632 }
3633 /* Fallthru (no break) */
3634 }
3635 pCtx->fExtrn = 0;
3636 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3637 ASMSetFlags(fEFlags);
3638 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3639 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3640 }
3641 }
3642 }
3643 }
3644#endif
3645
3646 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3647 {
3648 if (fWhat & CPUMCTX_EXTRN_CR0)
3649 {
3650 uint64_t u64Cr0;
3651 uint64_t u64Shadow;
3652 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3653 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3654#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3655 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3656 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3657#else
3658 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3659 {
3660 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3661 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3662 }
3663 else
3664 {
3665 /*
3666 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3667 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3668 * re-construct CR0. See @bugref{9180#c95} for details.
3669 */
3670 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3671 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3672 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3673 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3674 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3675 }
3676#endif
3677#ifndef IN_NEM_DARWIN
3678 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3679#endif
3680 CPUMSetGuestCR0(pVCpu, u64Cr0);
3681#ifndef IN_NEM_DARWIN
3682 VMMRZCallRing3Enable(pVCpu);
3683#endif
3684 }
3685
3686 if (fWhat & CPUMCTX_EXTRN_CR4)
3687 {
3688 uint64_t u64Cr4;
3689 uint64_t u64Shadow;
3690 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3691 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3692#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3693 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3694 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3695#else
3696 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3697 {
3698 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3699 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3700 }
3701 else
3702 {
3703 /*
3704 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3705 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3706 * re-construct CR4. See @bugref{9180#c95} for details.
3707 */
3708 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3709 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3710 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3711 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3712 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3713 }
3714#endif
3715 pCtx->cr4 = u64Cr4;
3716 }
3717
3718 if (fWhat & CPUMCTX_EXTRN_CR3)
3719 {
3720 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3721 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3722 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3723 && CPUMIsGuestPagingEnabledEx(pCtx)))
3724 {
3725 uint64_t u64Cr3;
3726 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3727 if (pCtx->cr3 != u64Cr3)
3728 {
3729 pCtx->cr3 = u64Cr3;
3730 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3731 }
3732
3733 /*
3734 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3735 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3736 */
3737 if (CPUMIsGuestInPAEModeEx(pCtx))
3738 {
3739 X86PDPE aPaePdpes[4];
3740 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3741 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3742 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3743 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3744 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3745 {
3746 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3747 /* PGM now updates PAE PDPTEs while updating CR3. */
3748 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3749 }
3750 }
3751 }
3752 }
3753 }
3754
3755#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3756 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3757 {
3758 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3759 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3760 {
3761 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3762 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3763 if (RT_SUCCESS(rc))
3764 { /* likely */ }
3765 else
3766 break;
3767 }
3768 }
3769#endif
3770 } while (0);
3771
3772 if (RT_SUCCESS(rc))
3773 {
3774 /* Update fExtrn. */
3775 pCtx->fExtrn &= ~fWhat;
3776
3777 /* If everything has been imported, clear the HM keeper bit. */
3778 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3779 {
3780#ifndef IN_NEM_DARWIN
3781 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3782#else
3783 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3784#endif
3785 Assert(!pCtx->fExtrn);
3786 }
3787 }
3788 }
3789#ifndef IN_NEM_DARWIN
3790 else
3791 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3792
3793 /*
3794 * Restore interrupts.
3795 */
3796 ASMSetFlags(fEFlags);
3797#endif
3798
3799 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3800
3801 if (RT_SUCCESS(rc))
3802 { /* likely */ }
3803 else
3804 return rc;
3805
3806 /*
3807 * Honor any pending CR3 updates.
3808 *
3809 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3810 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3811 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3812 *
3813 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3814 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3815 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3816 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3817 *
3818 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3819 *
3820 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3821 */
3822 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3823#ifndef IN_NEM_DARWIN
3824 && VMMRZCallRing3IsEnabled(pVCpu)
3825#endif
3826 )
3827 {
3828 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3829 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3830 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3831 }
3832
3833 return VINF_SUCCESS;
3834}
3835
3836
3837/**
3838 * Check per-VM and per-VCPU force flag actions that require us to go back to
3839 * ring-3 for one reason or another.
3840 *
3841 * @returns Strict VBox status code (i.e. informational status codes too)
3842 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3843 * ring-3.
3844 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3845 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3846 * interrupts)
3847 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3848 * all EMTs to be in ring-3.
3849 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3850 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3851 * to the EM loop.
3852 *
3853 * @param pVCpu The cross context virtual CPU structure.
3854 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
3855 * @param fStepping Whether we are single-stepping the guest using the
3856 * hypervisor debugger.
3857 *
3858 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
3859 * is no longer in VMX non-root mode.
3860 */
3861static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
3862{
3863#ifndef IN_NEM_DARWIN
3864 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3865#endif
3866
3867 /*
3868 * Update pending interrupts into the APIC's IRR.
3869 */
3870 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3871 APICUpdatePendingInterrupts(pVCpu);
3872
3873 /*
3874 * Anything pending? Should be more likely than not if we're doing a good job.
3875 */
3876 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3877 if ( !fStepping
3878 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
3879 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
3880 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
3881 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3882 return VINF_SUCCESS;
3883
3884 /* Pending PGM C3 sync. */
3885 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3886 {
3887 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3888 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
3889 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
3890 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3891 if (rcStrict != VINF_SUCCESS)
3892 {
3893 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
3894 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
3895 return rcStrict;
3896 }
3897 }
3898
3899 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3900 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3901 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3902 {
3903 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
3904 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3905 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3906 return rc;
3907 }
3908
3909 /* Pending VM request packets, such as hardware interrupts. */
3910 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3911 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3912 {
3913 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
3914 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3915 return VINF_EM_PENDING_REQUEST;
3916 }
3917
3918 /* Pending PGM pool flushes. */
3919 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3920 {
3921 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
3922 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3923 return VINF_PGM_POOL_FLUSH_PENDING;
3924 }
3925
3926 /* Pending DMA requests. */
3927 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
3928 {
3929 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
3930 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
3931 return VINF_EM_RAW_TO_R3;
3932 }
3933
3934#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3935 /*
3936 * Pending nested-guest events.
3937 *
3938 * Please note the priority of these events are specified and important.
3939 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
3940 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
3941 */
3942 if (fIsNestedGuest)
3943 {
3944 /* Pending nested-guest APIC-write. */
3945 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3946 {
3947 Log4Func(("Pending nested-guest APIC-write\n"));
3948 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
3949 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3950 return rcStrict;
3951 }
3952
3953 /* Pending nested-guest monitor-trap flag (MTF). */
3954 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
3955 {
3956 Log4Func(("Pending nested-guest MTF\n"));
3957 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
3958 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3959 return rcStrict;
3960 }
3961
3962 /* Pending nested-guest VMX-preemption timer expired. */
3963 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3964 {
3965 Log4Func(("Pending nested-guest preempt timer\n"));
3966 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
3967 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3968 return rcStrict;
3969 }
3970 }
3971#else
3972 NOREF(fIsNestedGuest);
3973#endif
3974
3975 return VINF_SUCCESS;
3976}
3977
3978
3979/**
3980 * Converts any TRPM trap into a pending HM event. This is typically used when
3981 * entering from ring-3 (not longjmp returns).
3982 *
3983 * @param pVCpu The cross context virtual CPU structure.
3984 */
3985static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3986{
3987 Assert(TRPMHasTrap(pVCpu));
3988 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3989
3990 uint8_t uVector;
3991 TRPMEVENT enmTrpmEvent;
3992 uint32_t uErrCode;
3993 RTGCUINTPTR GCPtrFaultAddress;
3994 uint8_t cbInstr;
3995 bool fIcebp;
3996
3997 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
3998 AssertRC(rc);
3999
4000 uint32_t u32IntInfo;
4001 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4002 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4003
4004 rc = TRPMResetTrap(pVCpu);
4005 AssertRC(rc);
4006 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4007 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4008
4009 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4010}
4011
4012
4013/**
4014 * Converts the pending HM event into a TRPM trap.
4015 *
4016 * @param pVCpu The cross context virtual CPU structure.
4017 */
4018static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4019{
4020 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4021
4022 /* If a trap was already pending, we did something wrong! */
4023 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4024
4025 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4026 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4027 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4028
4029 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4030
4031 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4032 AssertRC(rc);
4033
4034 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4035 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4036
4037 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4038 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4039 else
4040 {
4041 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4042 switch (uVectorType)
4043 {
4044 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4045 TRPMSetTrapDueToIcebp(pVCpu);
4046 RT_FALL_THRU();
4047 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4048 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4049 {
4050 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4051 || ( uVector == X86_XCPT_BP /* INT3 */
4052 || uVector == X86_XCPT_OF /* INTO */
4053 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4054 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4055 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4056 break;
4057 }
4058 }
4059 }
4060
4061 /* We're now done converting the pending event. */
4062 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4063}
4064
4065
4066/**
4067 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4068 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4069 *
4070 * @param pVCpu The cross context virtual CPU structure.
4071 * @param pVmcsInfo The VMCS info. object.
4072 */
4073static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4074{
4075 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4076 {
4077 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4078 {
4079 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4080 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4081 AssertRC(rc);
4082 }
4083 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4084}
4085
4086
4087/**
4088 * Clears the interrupt-window exiting control in the VMCS.
4089 *
4090 * @param pVCpu The cross context virtual CPU structure.
4091 * @param pVmcsInfo The VMCS info. object.
4092 */
4093DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4094{
4095 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4096 {
4097 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4098 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4099 AssertRC(rc);
4100 }
4101}
4102
4103
4104/**
4105 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4106 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4107 *
4108 * @param pVCpu The cross context virtual CPU structure.
4109 * @param pVmcsInfo The VMCS info. object.
4110 */
4111static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4112{
4113 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4114 {
4115 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4116 {
4117 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4118 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4119 AssertRC(rc);
4120 Log4Func(("Setup NMI-window exiting\n"));
4121 }
4122 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4123}
4124
4125
4126/**
4127 * Clears the NMI-window exiting control in the VMCS.
4128 *
4129 * @param pVCpu The cross context virtual CPU structure.
4130 * @param pVmcsInfo The VMCS info. object.
4131 */
4132DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4133{
4134 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4135 {
4136 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4137 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4138 AssertRC(rc);
4139 }
4140}
4141
4142
4143/**
4144 * Injects an event into the guest upon VM-entry by updating the relevant fields
4145 * in the VM-entry area in the VMCS.
4146 *
4147 * @returns Strict VBox status code (i.e. informational status codes too).
4148 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4149 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4150 *
4151 * @param pVCpu The cross context virtual CPU structure.
4152 * @param pVmxTransient The VMX-transient structure.
4153 * @param pEvent The event being injected.
4154 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4155 * will be updated if necessary. This cannot not be NULL.
4156 * @param fStepping Whether we're single-stepping guest execution and should
4157 * return VINF_EM_DBG_STEPPED if the event is injected
4158 * directly (registers modified by us, not by hardware on
4159 * VM-entry).
4160 */
4161static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent, bool fStepping,
4162 uint32_t *pfIntrState)
4163{
4164 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4165 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4166 Assert(pfIntrState);
4167
4168#ifdef IN_NEM_DARWIN
4169 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4170#endif
4171
4172 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4173 uint32_t u32IntInfo = pEvent->u64IntInfo;
4174 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4175 uint32_t const cbInstr = pEvent->cbInstr;
4176 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4177 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4178 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4179
4180#ifdef VBOX_STRICT
4181 /*
4182 * Validate the error-code-valid bit for hardware exceptions.
4183 * No error codes for exceptions in real-mode.
4184 *
4185 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4186 */
4187 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4188 && !CPUMIsGuestInRealModeEx(pCtx))
4189 {
4190 switch (uVector)
4191 {
4192 case X86_XCPT_PF:
4193 case X86_XCPT_DF:
4194 case X86_XCPT_TS:
4195 case X86_XCPT_NP:
4196 case X86_XCPT_SS:
4197 case X86_XCPT_GP:
4198 case X86_XCPT_AC:
4199 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4200 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4201 RT_FALL_THRU();
4202 default:
4203 break;
4204 }
4205 }
4206
4207 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4208 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4209 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4210#endif
4211
4212 RT_NOREF(uVector);
4213 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4214 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4215 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4216 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4217 {
4218 Assert(uVector <= X86_XCPT_LAST);
4219 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4220 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4221 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4222 }
4223 else
4224 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4225
4226 /*
4227 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4228 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4229 * interrupt handler in the (real-mode) guest.
4230 *
4231 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4232 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4233 */
4234 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4235 {
4236#ifndef IN_NEM_DARWIN
4237 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4238#endif
4239 {
4240 /*
4241 * For CPUs with unrestricted guest execution enabled and with the guest
4242 * in real-mode, we must not set the deliver-error-code bit.
4243 *
4244 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4245 */
4246 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4247 }
4248#ifndef IN_NEM_DARWIN
4249 else
4250 {
4251 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4252 Assert(PDMVmmDevHeapIsEnabled(pVM));
4253 Assert(pVM->hm.s.vmx.pRealModeTSS);
4254 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4255
4256 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4257 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4258 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4259 AssertRCReturn(rc2, rc2);
4260
4261 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4262 size_t const cbIdtEntry = sizeof(X86IDTR16);
4263 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4264 {
4265 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4266 if (uVector == X86_XCPT_DF)
4267 return VINF_EM_RESET;
4268
4269 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4270 No error codes for exceptions in real-mode. */
4271 if (uVector == X86_XCPT_GP)
4272 {
4273 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4274 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4275 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4276 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4277 HMEVENT EventXcptDf;
4278 RT_ZERO(EventXcptDf);
4279 EventXcptDf.u64IntInfo = uXcptDfInfo;
4280 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState);
4281 }
4282
4283 /*
4284 * If we're injecting an event with no valid IDT entry, inject a #GP.
4285 * No error codes for exceptions in real-mode.
4286 *
4287 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4288 */
4289 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4290 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4291 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4292 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4293 HMEVENT EventXcptGp;
4294 RT_ZERO(EventXcptGp);
4295 EventXcptGp.u64IntInfo = uXcptGpInfo;
4296 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState);
4297 }
4298
4299 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4300 uint16_t uGuestIp = pCtx->ip;
4301 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4302 {
4303 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4304 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4305 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4306 }
4307 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4308 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4309
4310 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4311 X86IDTR16 IdtEntry;
4312 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4313 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4314 AssertRCReturn(rc2, rc2);
4315
4316 /* Construct the stack frame for the interrupt/exception handler. */
4317 VBOXSTRICTRC rcStrict;
4318 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4319 if (rcStrict == VINF_SUCCESS)
4320 {
4321 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4322 if (rcStrict == VINF_SUCCESS)
4323 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4324 }
4325
4326 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4327 if (rcStrict == VINF_SUCCESS)
4328 {
4329 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4330 pCtx->rip = IdtEntry.offSel;
4331 pCtx->cs.Sel = IdtEntry.uSel;
4332 pCtx->cs.ValidSel = IdtEntry.uSel;
4333 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4334 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4335 && uVector == X86_XCPT_PF)
4336 pCtx->cr2 = GCPtrFault;
4337
4338 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4339 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4340 | HM_CHANGED_GUEST_RSP);
4341
4342 /*
4343 * If we delivered a hardware exception (other than an NMI) and if there was
4344 * block-by-STI in effect, we should clear it.
4345 */
4346 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4347 {
4348 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4349 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4350 Log4Func(("Clearing inhibition due to STI\n"));
4351 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4352 }
4353
4354 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4355 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4356
4357 /*
4358 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4359 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4360 */
4361 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4362
4363 /*
4364 * If we eventually support nested-guest execution without unrestricted guest execution,
4365 * we should set fInterceptEvents here.
4366 */
4367 Assert(!fIsNestedGuest);
4368
4369 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4370 if (fStepping)
4371 rcStrict = VINF_EM_DBG_STEPPED;
4372 }
4373 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4374 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4375 return rcStrict;
4376 }
4377#else
4378 RT_NOREF(pVmcsInfo);
4379#endif
4380 }
4381
4382 /*
4383 * Validate.
4384 */
4385 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4386 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4387
4388 /*
4389 * Inject the event into the VMCS.
4390 */
4391 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4392 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4393 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4394 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4395 AssertRC(rc);
4396
4397 /*
4398 * Update guest CR2 if this is a page-fault.
4399 */
4400 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4401 pCtx->cr2 = GCPtrFault;
4402
4403 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4404 return VINF_SUCCESS;
4405}
4406
4407
4408/**
4409 * Evaluates the event to be delivered to the guest and sets it as the pending
4410 * event.
4411 *
4412 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4413 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4414 * NOT restore these force-flags.
4415 *
4416 * @returns Strict VBox status code (i.e. informational status codes too).
4417 * @param pVCpu The cross context virtual CPU structure.
4418 * @param pVmcsInfo The VMCS information structure.
4419 * @param fIsNestedGuest Flag whether the evaluation happens for a nestd guest.
4420 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4421 */
4422static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4423{
4424 Assert(pfIntrState);
4425 Assert(!TRPMHasTrap(pVCpu));
4426
4427 /*
4428 * Compute/update guest-interruptibility state related FFs.
4429 * The FFs will be used below while evaluating events to be injected.
4430 */
4431 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4432
4433 /*
4434 * Evaluate if a new event needs to be injected.
4435 * An event that's already pending has already performed all necessary checks.
4436 */
4437 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4438 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4439 {
4440 /** @todo SMI. SMIs take priority over NMIs. */
4441
4442 /*
4443 * NMIs.
4444 * NMIs take priority over external interrupts.
4445 */
4446#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4447 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4448#endif
4449 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4450 {
4451 /*
4452 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4453 *
4454 * For a nested-guest, the FF always indicates the outer guest's ability to
4455 * receive an NMI while the guest-interruptibility state bit depends on whether
4456 * the nested-hypervisor is using virtual-NMIs.
4457 */
4458 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4459 {
4460#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4461 if ( fIsNestedGuest
4462 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4463 return IEMExecVmxVmexitXcptNmi(pVCpu);
4464#endif
4465 vmxHCSetPendingXcptNmi(pVCpu);
4466 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4467 Log4Func(("NMI pending injection\n"));
4468
4469 /* We've injected the NMI, bail. */
4470 return VINF_SUCCESS;
4471 }
4472 else if (!fIsNestedGuest)
4473 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4474 }
4475
4476 /*
4477 * External interrupts (PIC/APIC).
4478 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4479 * We cannot re-request the interrupt from the controller again.
4480 */
4481 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4482 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4483 {
4484 Assert(!DBGFIsStepping(pVCpu));
4485 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4486 AssertRC(rc);
4487
4488 /*
4489 * We must not check EFLAGS directly when executing a nested-guest, use
4490 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4491 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4492 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4493 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4494 *
4495 * See Intel spec. 25.4.1 "Event Blocking".
4496 */
4497 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4498 {
4499#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4500 if ( fIsNestedGuest
4501 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4502 {
4503 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4504 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4505 return rcStrict;
4506 }
4507#endif
4508 uint8_t u8Interrupt;
4509 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4510 if (RT_SUCCESS(rc))
4511 {
4512#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4513 if ( fIsNestedGuest
4514 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4515 {
4516 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4517 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4518 return rcStrict;
4519 }
4520#endif
4521 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4522 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4523 }
4524 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4525 {
4526 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4527
4528 if ( !fIsNestedGuest
4529 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4530 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4531 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4532
4533 /*
4534 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4535 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4536 * need to re-set this force-flag here.
4537 */
4538 }
4539 else
4540 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4541
4542 /* We've injected the interrupt or taken necessary action, bail. */
4543 return VINF_SUCCESS;
4544 }
4545 if (!fIsNestedGuest)
4546 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4547 }
4548 }
4549 else if (!fIsNestedGuest)
4550 {
4551 /*
4552 * An event is being injected or we are in an interrupt shadow. Check if another event is
4553 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4554 * the pending event.
4555 */
4556 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4557 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4558 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4559 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4560 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4561 }
4562 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4563
4564 return VINF_SUCCESS;
4565}
4566
4567
4568/**
4569 * Injects any pending events into the guest if the guest is in a state to
4570 * receive them.
4571 *
4572 * @returns Strict VBox status code (i.e. informational status codes too).
4573 * @param pVCpu The cross context virtual CPU structure.
4574 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4575 * @param fIntrState The VT-x guest-interruptibility state.
4576 * @param fStepping Whether we are single-stepping the guest using the
4577 * hypervisor debugger and should return
4578 * VINF_EM_DBG_STEPPED if the event was dispatched
4579 * directly.
4580 */
4581static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t fIntrState, bool fStepping)
4582{
4583 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4584#ifndef IN_NEM_DARWIN
4585 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4586#endif
4587
4588#ifdef VBOX_STRICT
4589 /*
4590 * Verify guest-interruptibility state.
4591 *
4592 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4593 * since injecting an event may modify the interruptibility state and we must thus always
4594 * use fIntrState.
4595 */
4596 {
4597 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4598 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4599 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4600 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4601 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4602 Assert(!TRPMHasTrap(pVCpu));
4603 NOREF(fBlockMovSS); NOREF(fBlockSti);
4604 }
4605#endif
4606
4607 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4608 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4609 {
4610 /*
4611 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4612 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4613 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4614 *
4615 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4616 */
4617 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4618#ifdef VBOX_STRICT
4619 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4620 {
4621 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4622 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4623 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4624 }
4625 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4626 {
4627 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4628 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4629 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4630 }
4631#endif
4632 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4633 uIntType));
4634
4635 /*
4636 * Inject the event and get any changes to the guest-interruptibility state.
4637 *
4638 * The guest-interruptibility state may need to be updated if we inject the event
4639 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4640 */
4641 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4642 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4643
4644 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4645 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4646 else
4647 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4648 }
4649
4650 /*
4651 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4652 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4653 */
4654 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4655 && !fIsNestedGuest)
4656 {
4657 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4658
4659 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4660 {
4661 /*
4662 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4663 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4664 */
4665 Assert(!DBGFIsStepping(pVCpu));
4666 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4667 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4668 AssertRC(rc);
4669 }
4670 else
4671 {
4672 /*
4673 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4674 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4675 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4676 * we use MTF, so just make sure it's called before executing guest-code.
4677 */
4678 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4679 }
4680 }
4681 /* else: for nested-guest currently handling while merging controls. */
4682
4683 /*
4684 * Finally, update the guest-interruptibility state.
4685 *
4686 * This is required for the real-on-v86 software interrupt injection, for
4687 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4688 */
4689 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4690 AssertRC(rc);
4691
4692 /*
4693 * There's no need to clear the VM-entry interruption-information field here if we're not
4694 * injecting anything. VT-x clears the valid bit on every VM-exit.
4695 *
4696 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4697 */
4698
4699 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4700 return rcStrict;
4701}
4702
4703
4704/**
4705 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4706 * and update error record fields accordingly.
4707 *
4708 * @returns VMX_IGS_* error codes.
4709 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4710 * wrong with the guest state.
4711 *
4712 * @param pVCpu The cross context virtual CPU structure.
4713 * @param pVmcsInfo The VMCS info. object.
4714 *
4715 * @remarks This function assumes our cache of the VMCS controls
4716 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4717 */
4718static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4719{
4720#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4721#define HMVMX_CHECK_BREAK(expr, err) do { \
4722 if (!(expr)) { uError = (err); break; } \
4723 } while (0)
4724
4725 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4726 uint32_t uError = VMX_IGS_ERROR;
4727 uint32_t u32IntrState = 0;
4728#ifndef IN_NEM_DARWIN
4729 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4730 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4731#else
4732 bool const fUnrestrictedGuest = true;
4733#endif
4734 do
4735 {
4736 int rc;
4737
4738 /*
4739 * Guest-interruptibility state.
4740 *
4741 * Read this first so that any check that fails prior to those that actually
4742 * require the guest-interruptibility state would still reflect the correct
4743 * VMCS value and avoids causing further confusion.
4744 */
4745 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4746 AssertRC(rc);
4747
4748 uint32_t u32Val;
4749 uint64_t u64Val;
4750
4751 /*
4752 * CR0.
4753 */
4754 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4755 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4756 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4757 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4758 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4759 if (fUnrestrictedGuest)
4760 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4761
4762 uint64_t u64GuestCr0;
4763 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4764 AssertRC(rc);
4765 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4766 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4767 if ( !fUnrestrictedGuest
4768 && (u64GuestCr0 & X86_CR0_PG)
4769 && !(u64GuestCr0 & X86_CR0_PE))
4770 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4771
4772 /*
4773 * CR4.
4774 */
4775 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4776 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4777 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4778
4779 uint64_t u64GuestCr4;
4780 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4781 AssertRC(rc);
4782 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4783 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4784
4785 /*
4786 * IA32_DEBUGCTL MSR.
4787 */
4788 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4789 AssertRC(rc);
4790 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4791 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4792 {
4793 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4794 }
4795 uint64_t u64DebugCtlMsr = u64Val;
4796
4797#ifdef VBOX_STRICT
4798 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4799 AssertRC(rc);
4800 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4801#endif
4802 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4803
4804 /*
4805 * RIP and RFLAGS.
4806 */
4807 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4808 AssertRC(rc);
4809 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4810 if ( !fLongModeGuest
4811 || !pCtx->cs.Attr.n.u1Long)
4812 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4813 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4814 * must be identical if the "IA-32e mode guest" VM-entry
4815 * control is 1 and CS.L is 1. No check applies if the
4816 * CPU supports 64 linear-address bits. */
4817
4818 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4819 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4820 AssertRC(rc);
4821 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4822 VMX_IGS_RFLAGS_RESERVED);
4823 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4824 uint32_t const u32Eflags = u64Val;
4825
4826 if ( fLongModeGuest
4827 || ( fUnrestrictedGuest
4828 && !(u64GuestCr0 & X86_CR0_PE)))
4829 {
4830 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4831 }
4832
4833 uint32_t u32EntryInfo;
4834 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4835 AssertRC(rc);
4836 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4837 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4838
4839 /*
4840 * 64-bit checks.
4841 */
4842 if (fLongModeGuest)
4843 {
4844 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4845 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4846 }
4847
4848 if ( !fLongModeGuest
4849 && (u64GuestCr4 & X86_CR4_PCIDE))
4850 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
4851
4852 /** @todo CR3 field must be such that bits 63:52 and bits in the range
4853 * 51:32 beyond the processor's physical-address width are 0. */
4854
4855 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4856 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
4857 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
4858
4859#ifndef IN_NEM_DARWIN
4860 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
4861 AssertRC(rc);
4862 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
4863
4864 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
4865 AssertRC(rc);
4866 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
4867#endif
4868
4869 /*
4870 * PERF_GLOBAL MSR.
4871 */
4872 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
4873 {
4874 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
4875 AssertRC(rc);
4876 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
4877 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
4878 }
4879
4880 /*
4881 * PAT MSR.
4882 */
4883 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4884 {
4885 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
4886 AssertRC(rc);
4887 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
4888 for (unsigned i = 0; i < 8; i++)
4889 {
4890 uint8_t u8Val = (u64Val & 0xff);
4891 if ( u8Val != 0 /* UC */
4892 && u8Val != 1 /* WC */
4893 && u8Val != 4 /* WT */
4894 && u8Val != 5 /* WP */
4895 && u8Val != 6 /* WB */
4896 && u8Val != 7 /* UC- */)
4897 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
4898 u64Val >>= 8;
4899 }
4900 }
4901
4902 /*
4903 * EFER MSR.
4904 */
4905 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4906 {
4907 Assert(g_fHmVmxSupportsVmcsEfer);
4908 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
4909 AssertRC(rc);
4910 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
4911 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
4912 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
4913 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
4914 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
4915 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
4916 * iemVmxVmentryCheckGuestState(). */
4917 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4918 || !(u64GuestCr0 & X86_CR0_PG)
4919 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
4920 VMX_IGS_EFER_LMA_LME_MISMATCH);
4921 }
4922
4923 /*
4924 * Segment registers.
4925 */
4926 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
4927 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
4928 if (!(u32Eflags & X86_EFL_VM))
4929 {
4930 /* CS */
4931 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
4932 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
4933 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
4934 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4935 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4936 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
4937 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4938 /* CS cannot be loaded with NULL in protected mode. */
4939 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
4940 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
4941 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4942 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
4943 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4944 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
4945 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
4946 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
4947 else
4948 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
4949
4950 /* SS */
4951 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4952 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
4953 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
4954 if ( !(pCtx->cr0 & X86_CR0_PE)
4955 || pCtx->cs.Attr.n.u4Type == 3)
4956 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
4957
4958 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4959 {
4960 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
4961 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
4962 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
4963 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
4964 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4965 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4966 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
4967 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4968 }
4969
4970 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
4971 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4972 {
4973 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
4974 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
4975 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4976 || pCtx->ds.Attr.n.u4Type > 11
4977 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4978 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
4979 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
4980 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4981 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4982 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
4983 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4984 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4985 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
4986 }
4987 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4988 {
4989 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
4990 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
4991 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4992 || pCtx->es.Attr.n.u4Type > 11
4993 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4994 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
4995 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
4996 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
4997 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
4998 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
4999 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5000 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5001 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5002 }
5003 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5004 {
5005 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5006 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5007 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5008 || pCtx->fs.Attr.n.u4Type > 11
5009 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5010 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5011 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5012 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5013 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5014 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5015 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5016 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5017 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5018 }
5019 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5020 {
5021 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5022 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5023 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5024 || pCtx->gs.Attr.n.u4Type > 11
5025 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5026 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5027 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5028 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5029 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5030 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5031 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5032 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5033 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5034 }
5035 /* 64-bit capable CPUs. */
5036 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5037 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5038 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5039 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5040 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5041 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5042 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5043 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5044 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5045 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5046 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5047 }
5048 else
5049 {
5050 /* V86 mode checks. */
5051 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5052 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5053 {
5054 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5055 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5056 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5057 }
5058 else
5059 {
5060 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5061 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5062 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5063 }
5064
5065 /* CS */
5066 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5067 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5068 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5069 /* SS */
5070 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5071 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5072 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5073 /* DS */
5074 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5075 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5076 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5077 /* ES */
5078 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5079 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5080 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5081 /* FS */
5082 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5083 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5084 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5085 /* GS */
5086 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5087 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5088 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5089 /* 64-bit capable CPUs. */
5090 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5091 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5092 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5093 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5094 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5095 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5096 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5097 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5098 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5099 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5100 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5101 }
5102
5103 /*
5104 * TR.
5105 */
5106 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5107 /* 64-bit capable CPUs. */
5108 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5109 if (fLongModeGuest)
5110 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5111 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5112 else
5113 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5114 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5115 VMX_IGS_TR_ATTR_TYPE_INVALID);
5116 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5117 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5118 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5119 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5120 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5121 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5122 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5123 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5124
5125 /*
5126 * GDTR and IDTR (64-bit capable checks).
5127 */
5128 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5129 AssertRC(rc);
5130 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5131
5132 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5133 AssertRC(rc);
5134 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5135
5136 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5137 AssertRC(rc);
5138 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5139
5140 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5141 AssertRC(rc);
5142 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5143
5144 /*
5145 * Guest Non-Register State.
5146 */
5147 /* Activity State. */
5148 uint32_t u32ActivityState;
5149 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5150 AssertRC(rc);
5151 HMVMX_CHECK_BREAK( !u32ActivityState
5152 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5153 VMX_IGS_ACTIVITY_STATE_INVALID);
5154 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5155 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5156
5157 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5158 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5159 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5160
5161 /** @todo Activity state and injecting interrupts. Left as a todo since we
5162 * currently don't use activity states but ACTIVE. */
5163
5164 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5165 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5166
5167 /* Guest interruptibility-state. */
5168 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5169 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5170 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5171 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5172 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5173 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5174 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5175 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5176 {
5177 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5178 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5179 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5180 }
5181 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5182 {
5183 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5184 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5185 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5186 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5187 }
5188 /** @todo Assumes the processor is not in SMM. */
5189 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5190 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5191 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5192 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5193 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5194 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5195 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5196 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5197
5198 /* Pending debug exceptions. */
5199 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5200 AssertRC(rc);
5201 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5202 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5203 u32Val = u64Val; /* For pending debug exceptions checks below. */
5204
5205 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5206 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5207 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5208 {
5209 if ( (u32Eflags & X86_EFL_TF)
5210 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5211 {
5212 /* Bit 14 is PendingDebug.BS. */
5213 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5214 }
5215 if ( !(u32Eflags & X86_EFL_TF)
5216 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5217 {
5218 /* Bit 14 is PendingDebug.BS. */
5219 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5220 }
5221 }
5222
5223#ifndef IN_NEM_DARWIN
5224 /* VMCS link pointer. */
5225 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5226 AssertRC(rc);
5227 if (u64Val != UINT64_C(0xffffffffffffffff))
5228 {
5229 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5230 /** @todo Bits beyond the processor's physical-address width MBZ. */
5231 /** @todo SMM checks. */
5232 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5233 Assert(pVmcsInfo->pvShadowVmcs);
5234 VMXVMCSREVID VmcsRevId;
5235 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5236 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5237 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5238 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5239 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5240 }
5241
5242 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5243 * not using nested paging? */
5244 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5245 && !fLongModeGuest
5246 && CPUMIsGuestInPAEModeEx(pCtx))
5247 {
5248 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5249 AssertRC(rc);
5250 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5251
5252 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5253 AssertRC(rc);
5254 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5255
5256 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5257 AssertRC(rc);
5258 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5259
5260 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5261 AssertRC(rc);
5262 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5263 }
5264#endif
5265
5266 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5267 if (uError == VMX_IGS_ERROR)
5268 uError = VMX_IGS_REASON_NOT_FOUND;
5269 } while (0);
5270
5271 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5272 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5273 return uError;
5274
5275#undef HMVMX_ERROR_BREAK
5276#undef HMVMX_CHECK_BREAK
5277}
5278/** @} */
5279
5280
5281#ifndef HMVMX_USE_FUNCTION_TABLE
5282/**
5283 * Handles a guest VM-exit from hardware-assisted VMX execution.
5284 *
5285 * @returns Strict VBox status code (i.e. informational status codes too).
5286 * @param pVCpu The cross context virtual CPU structure.
5287 * @param pVmxTransient The VMX-transient structure.
5288 */
5289DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5290{
5291#ifdef DEBUG_ramshankar
5292# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5293 do { \
5294 if (a_fSave != 0) \
5295 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5296 VBOXSTRICTRC rcStrict = a_CallExpr; \
5297 if (a_fSave != 0) \
5298 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5299 return rcStrict; \
5300 } while (0)
5301#else
5302# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5303#endif
5304 uint32_t const uExitReason = pVmxTransient->uExitReason;
5305 switch (uExitReason)
5306 {
5307 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5308 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5309 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5310 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5311 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5312 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5313 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5314 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5315 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5316 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5317 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5318 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5319 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5320 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5321 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5322 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5323 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5324 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5325 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5326 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5327 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5328 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5329 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5330 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5331 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5332 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5333 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5334 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5335 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5336 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5337#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5338 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5339 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5340 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5341 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5342 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5343 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5344 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5345 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5346 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5347 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5348#else
5349 case VMX_EXIT_VMCLEAR:
5350 case VMX_EXIT_VMLAUNCH:
5351 case VMX_EXIT_VMPTRLD:
5352 case VMX_EXIT_VMPTRST:
5353 case VMX_EXIT_VMREAD:
5354 case VMX_EXIT_VMRESUME:
5355 case VMX_EXIT_VMWRITE:
5356 case VMX_EXIT_VMXOFF:
5357 case VMX_EXIT_VMXON:
5358 case VMX_EXIT_INVVPID:
5359 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5360#endif
5361#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
5362 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5363#else
5364 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5365#endif
5366
5367 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5368 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5369 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5370
5371 case VMX_EXIT_INIT_SIGNAL:
5372 case VMX_EXIT_SIPI:
5373 case VMX_EXIT_IO_SMI:
5374 case VMX_EXIT_SMI:
5375 case VMX_EXIT_ERR_MSR_LOAD:
5376 case VMX_EXIT_ERR_MACHINE_CHECK:
5377 case VMX_EXIT_PML_FULL:
5378 case VMX_EXIT_VIRTUALIZED_EOI:
5379 case VMX_EXIT_GDTR_IDTR_ACCESS:
5380 case VMX_EXIT_LDTR_TR_ACCESS:
5381 case VMX_EXIT_APIC_WRITE:
5382 case VMX_EXIT_RDRAND:
5383 case VMX_EXIT_RSM:
5384 case VMX_EXIT_VMFUNC:
5385 case VMX_EXIT_ENCLS:
5386 case VMX_EXIT_RDSEED:
5387 case VMX_EXIT_XSAVES:
5388 case VMX_EXIT_XRSTORS:
5389 case VMX_EXIT_UMWAIT:
5390 case VMX_EXIT_TPAUSE:
5391 case VMX_EXIT_LOADIWKEY:
5392 default:
5393 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5394 }
5395#undef VMEXIT_CALL_RET
5396}
5397#endif /* !HMVMX_USE_FUNCTION_TABLE */
5398
5399
5400#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5401/**
5402 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5403 *
5404 * @returns Strict VBox status code (i.e. informational status codes too).
5405 * @param pVCpu The cross context virtual CPU structure.
5406 * @param pVmxTransient The VMX-transient structure.
5407 */
5408DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5409{
5410 uint32_t const uExitReason = pVmxTransient->uExitReason;
5411 switch (uExitReason)
5412 {
5413 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
5414 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
5415 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5416 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5417 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5418
5419 /*
5420 * We shouldn't direct host physical interrupts to the nested-guest.
5421 */
5422 case VMX_EXIT_EXT_INT:
5423 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5424
5425 /*
5426 * Instructions that cause VM-exits unconditionally or the condition is
5427 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5428 * happens, it's guaranteed to be a nested-guest VM-exit).
5429 *
5430 * - Provides VM-exit instruction length ONLY.
5431 */
5432 case VMX_EXIT_CPUID: /* Unconditional. */
5433 case VMX_EXIT_VMCALL:
5434 case VMX_EXIT_GETSEC:
5435 case VMX_EXIT_INVD:
5436 case VMX_EXIT_XSETBV:
5437 case VMX_EXIT_VMLAUNCH:
5438 case VMX_EXIT_VMRESUME:
5439 case VMX_EXIT_VMXOFF:
5440 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5441 case VMX_EXIT_VMFUNC:
5442 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5443
5444 /*
5445 * Instructions that cause VM-exits unconditionally or the condition is
5446 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5447 * happens, it's guaranteed to be a nested-guest VM-exit).
5448 *
5449 * - Provides VM-exit instruction length.
5450 * - Provides VM-exit information.
5451 * - Optionally provides Exit qualification.
5452 *
5453 * Since Exit qualification is 0 for all VM-exits where it is not
5454 * applicable, reading and passing it to the guest should produce
5455 * defined behavior.
5456 *
5457 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5458 */
5459 case VMX_EXIT_INVEPT: /* Unconditional. */
5460 case VMX_EXIT_INVVPID:
5461 case VMX_EXIT_VMCLEAR:
5462 case VMX_EXIT_VMPTRLD:
5463 case VMX_EXIT_VMPTRST:
5464 case VMX_EXIT_VMXON:
5465 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5466 case VMX_EXIT_LDTR_TR_ACCESS:
5467 case VMX_EXIT_RDRAND:
5468 case VMX_EXIT_RDSEED:
5469 case VMX_EXIT_XSAVES:
5470 case VMX_EXIT_XRSTORS:
5471 case VMX_EXIT_UMWAIT:
5472 case VMX_EXIT_TPAUSE:
5473 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5474
5475 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5476 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5477 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5478 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5479 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5480 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5481 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5482 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5483 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5484 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5485 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5486 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5487 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5488 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5489 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5490 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5491 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5492 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5493 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5494
5495 case VMX_EXIT_PREEMPT_TIMER:
5496 {
5497 /** @todo NSTVMX: Preempt timer. */
5498 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5499 }
5500
5501 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5502 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5503
5504 case VMX_EXIT_VMREAD:
5505 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5506
5507 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5508 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5509
5510 case VMX_EXIT_INIT_SIGNAL:
5511 case VMX_EXIT_SIPI:
5512 case VMX_EXIT_IO_SMI:
5513 case VMX_EXIT_SMI:
5514 case VMX_EXIT_ERR_MSR_LOAD:
5515 case VMX_EXIT_ERR_MACHINE_CHECK:
5516 case VMX_EXIT_PML_FULL:
5517 case VMX_EXIT_RSM:
5518 default:
5519 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5520 }
5521}
5522#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5523
5524
5525/** @name VM-exit helpers.
5526 * @{
5527 */
5528/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5529/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5530/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5531
5532/** Macro for VM-exits called unexpectedly. */
5533#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5534 do { \
5535 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5536 return VERR_VMX_UNEXPECTED_EXIT; \
5537 } while (0)
5538
5539#ifdef VBOX_STRICT
5540# ifndef IN_NEM_DARWIN
5541/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5542# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5543 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5544
5545# define HMVMX_ASSERT_PREEMPT_CPUID() \
5546 do { \
5547 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5548 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5549 } while (0)
5550
5551# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5552 do { \
5553 AssertPtr((a_pVCpu)); \
5554 AssertPtr((a_pVmxTransient)); \
5555 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5556 Assert((a_pVmxTransient)->pVmcsInfo); \
5557 Assert(ASMIntAreEnabled()); \
5558 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5559 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5560 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5561 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5562 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5563 HMVMX_ASSERT_PREEMPT_CPUID(); \
5564 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5565 } while (0)
5566# else
5567# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5568# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5569# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5570 do { \
5571 AssertPtr((a_pVCpu)); \
5572 AssertPtr((a_pVmxTransient)); \
5573 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5574 Assert((a_pVmxTransient)->pVmcsInfo); \
5575 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5576 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5577 } while (0)
5578# endif
5579
5580# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5581 do { \
5582 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5583 Assert((a_pVmxTransient)->fIsNestedGuest); \
5584 } while (0)
5585
5586# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5587 do { \
5588 Log4Func(("\n")); \
5589 } while (0)
5590#else
5591# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5592 do { \
5593 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5594 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5595 } while (0)
5596
5597# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5598 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5599
5600# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5601#endif
5602
5603#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5604/** Macro that does the necessary privilege checks and intercepted VM-exits for
5605 * guests that attempted to execute a VMX instruction. */
5606# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5607 do \
5608 { \
5609 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5610 if (rcStrictTmp == VINF_SUCCESS) \
5611 { /* likely */ } \
5612 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5613 { \
5614 Assert((a_pVCpu)->hm.s.Event.fPending); \
5615 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5616 return VINF_SUCCESS; \
5617 } \
5618 else \
5619 { \
5620 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5621 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5622 } \
5623 } while (0)
5624
5625/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5626# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5627 do \
5628 { \
5629 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5630 (a_pGCPtrEffAddr)); \
5631 if (rcStrictTmp == VINF_SUCCESS) \
5632 { /* likely */ } \
5633 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5634 { \
5635 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5636 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5637 NOREF(uXcptTmp); \
5638 return VINF_SUCCESS; \
5639 } \
5640 else \
5641 { \
5642 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5643 return rcStrictTmp; \
5644 } \
5645 } while (0)
5646#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5647
5648
5649/**
5650 * Advances the guest RIP by the specified number of bytes.
5651 *
5652 * @param pVCpu The cross context virtual CPU structure.
5653 * @param cbInstr Number of bytes to advance the RIP by.
5654 *
5655 * @remarks No-long-jump zone!!!
5656 */
5657DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5658{
5659 /* Advance the RIP. */
5660 pVCpu->cpum.GstCtx.rip += cbInstr;
5661 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5662
5663 /* Update interrupt inhibition. */
5664 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5665 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5666 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5667}
5668
5669
5670/**
5671 * Advances the guest RIP after reading it from the VMCS.
5672 *
5673 * @returns VBox status code, no informational status codes.
5674 * @param pVCpu The cross context virtual CPU structure.
5675 * @param pVmxTransient The VMX-transient structure.
5676 *
5677 * @remarks No-long-jump zone!!!
5678 */
5679static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5680{
5681 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
5682 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5683 AssertRCReturn(rc, rc);
5684
5685 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5686 return VINF_SUCCESS;
5687}
5688
5689
5690/**
5691 * Handle a condition that occurred while delivering an event through the guest or
5692 * nested-guest IDT.
5693 *
5694 * @returns Strict VBox status code (i.e. informational status codes too).
5695 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5696 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5697 * to continue execution of the guest which will delivery the \#DF.
5698 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5699 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5700 *
5701 * @param pVCpu The cross context virtual CPU structure.
5702 * @param pVmxTransient The VMX-transient structure.
5703 *
5704 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5705 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5706 * is due to an EPT violation, PML full or SPP-related event.
5707 *
5708 * @remarks No-long-jump zone!!!
5709 */
5710static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5711{
5712 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5713 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5714 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5715 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5716 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5717 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5718
5719 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5720 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5721 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5722 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5723 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5724 {
5725 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5726 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5727
5728 /*
5729 * If the event was a software interrupt (generated with INT n) or a software exception
5730 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5731 * can handle the VM-exit and continue guest execution which will re-execute the
5732 * instruction rather than re-injecting the exception, as that can cause premature
5733 * trips to ring-3 before injection and involve TRPM which currently has no way of
5734 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5735 * the problem).
5736 */
5737 IEMXCPTRAISE enmRaise;
5738 IEMXCPTRAISEINFO fRaiseInfo;
5739 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5740 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5741 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5742 {
5743 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5744 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5745 }
5746 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5747 {
5748 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5749 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5750 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5751
5752 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5753 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5754
5755 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5756
5757 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5758 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5759 {
5760 pVmxTransient->fVectoringPF = true;
5761 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5762 }
5763 }
5764 else
5765 {
5766 /*
5767 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5768 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5769 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5770 */
5771 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5772 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5773 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5774 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5775 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5776 }
5777
5778 /*
5779 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5780 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5781 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5782 * subsequent VM-entry would fail, see @bugref{7445}.
5783 *
5784 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5785 */
5786 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5787 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5788 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5789 && CPUMIsGuestNmiBlocking(pVCpu))
5790 {
5791 CPUMSetGuestNmiBlocking(pVCpu, false);
5792 }
5793
5794 switch (enmRaise)
5795 {
5796 case IEMXCPTRAISE_CURRENT_XCPT:
5797 {
5798 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5799 Assert(rcStrict == VINF_SUCCESS);
5800 break;
5801 }
5802
5803 case IEMXCPTRAISE_PREV_EVENT:
5804 {
5805 uint32_t u32ErrCode;
5806 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5807 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5808 else
5809 u32ErrCode = 0;
5810
5811 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5812 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5813 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
5814 u32ErrCode, pVCpu->cpum.GstCtx.cr2);
5815
5816 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5817 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5818 Assert(rcStrict == VINF_SUCCESS);
5819 break;
5820 }
5821
5822 case IEMXCPTRAISE_REEXEC_INSTR:
5823 Assert(rcStrict == VINF_SUCCESS);
5824 break;
5825
5826 case IEMXCPTRAISE_DOUBLE_FAULT:
5827 {
5828 /*
5829 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5830 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5831 */
5832 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5833 {
5834 pVmxTransient->fVectoringDoublePF = true;
5835 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5836 pVCpu->cpum.GstCtx.cr2));
5837 rcStrict = VINF_SUCCESS;
5838 }
5839 else
5840 {
5841 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
5842 vmxHCSetPendingXcptDF(pVCpu);
5843 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5844 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5845 rcStrict = VINF_HM_DOUBLE_FAULT;
5846 }
5847 break;
5848 }
5849
5850 case IEMXCPTRAISE_TRIPLE_FAULT:
5851 {
5852 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
5853 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5854 rcStrict = VINF_EM_RESET;
5855 break;
5856 }
5857
5858 case IEMXCPTRAISE_CPU_HANG:
5859 {
5860 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
5861 rcStrict = VERR_EM_GUEST_CPU_HANG;
5862 break;
5863 }
5864
5865 default:
5866 {
5867 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
5868 rcStrict = VERR_VMX_IPE_2;
5869 break;
5870 }
5871 }
5872 }
5873 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5874 && !CPUMIsGuestNmiBlocking(pVCpu))
5875 {
5876 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
5877 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
5878 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
5879 {
5880 /*
5881 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
5882 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5883 * that virtual NMIs remain blocked until the IRET execution is completed.
5884 *
5885 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
5886 */
5887 CPUMSetGuestNmiBlocking(pVCpu, true);
5888 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5889 }
5890 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5891 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5892 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5893 {
5894 /*
5895 * Execution of IRET caused an EPT violation, page-modification log-full event or
5896 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
5897 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5898 * that virtual NMIs remain blocked until the IRET execution is completed.
5899 *
5900 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
5901 */
5902 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
5903 {
5904 CPUMSetGuestNmiBlocking(pVCpu, true);
5905 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5906 }
5907 }
5908 }
5909
5910 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5911 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5912 return rcStrict;
5913}
5914
5915
5916#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5917/**
5918 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
5919 * guest attempting to execute a VMX instruction.
5920 *
5921 * @returns Strict VBox status code (i.e. informational status codes too).
5922 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5923 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
5924 *
5925 * @param pVCpu The cross context virtual CPU structure.
5926 * @param uExitReason The VM-exit reason.
5927 *
5928 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
5929 * @remarks No-long-jump zone!!!
5930 */
5931static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
5932{
5933 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
5934 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
5935
5936 /*
5937 * The physical CPU would have already checked the CPU mode/code segment.
5938 * We shall just assert here for paranoia.
5939 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
5940 */
5941 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
5942 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5943 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
5944
5945 if (uExitReason == VMX_EXIT_VMXON)
5946 {
5947 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5948
5949 /*
5950 * We check CR4.VMXE because it is required to be always set while in VMX operation
5951 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
5952 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
5953 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
5954 */
5955 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
5956 {
5957 Log4Func(("CR4.VMXE is not set -> #UD\n"));
5958 vmxHCSetPendingXcptUD(pVCpu);
5959 return VINF_HM_PENDING_XCPT;
5960 }
5961 }
5962 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
5963 {
5964 /*
5965 * The guest has not entered VMX operation but attempted to execute a VMX instruction
5966 * (other than VMXON), we need to raise a #UD.
5967 */
5968 Log4Func(("Not in VMX root mode -> #UD\n"));
5969 vmxHCSetPendingXcptUD(pVCpu);
5970 return VINF_HM_PENDING_XCPT;
5971 }
5972
5973 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
5974 return VINF_SUCCESS;
5975}
5976
5977
5978/**
5979 * Decodes the memory operand of an instruction that caused a VM-exit.
5980 *
5981 * The Exit qualification field provides the displacement field for memory
5982 * operand instructions, if any.
5983 *
5984 * @returns Strict VBox status code (i.e. informational status codes too).
5985 * @retval VINF_SUCCESS if the operand was successfully decoded.
5986 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
5987 * operand.
5988 * @param pVCpu The cross context virtual CPU structure.
5989 * @param uExitInstrInfo The VM-exit instruction information field.
5990 * @param enmMemAccess The memory operand's access type (read or write).
5991 * @param GCPtrDisp The instruction displacement field, if any. For
5992 * RIP-relative addressing pass RIP + displacement here.
5993 * @param pGCPtrMem Where to store the effective destination memory address.
5994 *
5995 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
5996 * virtual-8086 mode hence skips those checks while verifying if the
5997 * segment is valid.
5998 */
5999static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6000 PRTGCPTR pGCPtrMem)
6001{
6002 Assert(pGCPtrMem);
6003 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6004 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6005 | CPUMCTX_EXTRN_CR0);
6006
6007 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6008 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6009 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6010
6011 VMXEXITINSTRINFO ExitInstrInfo;
6012 ExitInstrInfo.u = uExitInstrInfo;
6013 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6014 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6015 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6016 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6017 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6018 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6019 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6020 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6021 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6022
6023 /*
6024 * Validate instruction information.
6025 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6026 */
6027 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6028 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6029 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6030 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6031 AssertLogRelMsgReturn(fIsMemOperand,
6032 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6033
6034 /*
6035 * Compute the complete effective address.
6036 *
6037 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6038 * See AMD spec. 4.5.2 "Segment Registers".
6039 */
6040 RTGCPTR GCPtrMem = GCPtrDisp;
6041 if (fBaseRegValid)
6042 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6043 if (fIdxRegValid)
6044 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6045
6046 RTGCPTR const GCPtrOff = GCPtrMem;
6047 if ( !fIsLongMode
6048 || iSegReg >= X86_SREG_FS)
6049 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6050 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6051
6052 /*
6053 * Validate effective address.
6054 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6055 */
6056 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6057 Assert(cbAccess > 0);
6058 if (fIsLongMode)
6059 {
6060 if (X86_IS_CANONICAL(GCPtrMem))
6061 {
6062 *pGCPtrMem = GCPtrMem;
6063 return VINF_SUCCESS;
6064 }
6065
6066 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6067 * "Data Limit Checks in 64-bit Mode". */
6068 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6069 vmxHCSetPendingXcptGP(pVCpu, 0);
6070 return VINF_HM_PENDING_XCPT;
6071 }
6072
6073 /*
6074 * This is a watered down version of iemMemApplySegment().
6075 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6076 * and segment CPL/DPL checks are skipped.
6077 */
6078 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6079 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6080 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6081
6082 /* Check if the segment is present and usable. */
6083 if ( pSel->Attr.n.u1Present
6084 && !pSel->Attr.n.u1Unusable)
6085 {
6086 Assert(pSel->Attr.n.u1DescType);
6087 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6088 {
6089 /* Check permissions for the data segment. */
6090 if ( enmMemAccess == VMXMEMACCESS_WRITE
6091 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6092 {
6093 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6094 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6095 return VINF_HM_PENDING_XCPT;
6096 }
6097
6098 /* Check limits if it's a normal data segment. */
6099 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6100 {
6101 if ( GCPtrFirst32 > pSel->u32Limit
6102 || GCPtrLast32 > pSel->u32Limit)
6103 {
6104 Log4Func(("Data segment limit exceeded. "
6105 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6106 GCPtrLast32, pSel->u32Limit));
6107 if (iSegReg == X86_SREG_SS)
6108 vmxHCSetPendingXcptSS(pVCpu, 0);
6109 else
6110 vmxHCSetPendingXcptGP(pVCpu, 0);
6111 return VINF_HM_PENDING_XCPT;
6112 }
6113 }
6114 else
6115 {
6116 /* Check limits if it's an expand-down data segment.
6117 Note! The upper boundary is defined by the B bit, not the G bit! */
6118 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6119 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6120 {
6121 Log4Func(("Expand-down data segment limit exceeded. "
6122 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6123 GCPtrLast32, pSel->u32Limit));
6124 if (iSegReg == X86_SREG_SS)
6125 vmxHCSetPendingXcptSS(pVCpu, 0);
6126 else
6127 vmxHCSetPendingXcptGP(pVCpu, 0);
6128 return VINF_HM_PENDING_XCPT;
6129 }
6130 }
6131 }
6132 else
6133 {
6134 /* Check permissions for the code segment. */
6135 if ( enmMemAccess == VMXMEMACCESS_WRITE
6136 || ( enmMemAccess == VMXMEMACCESS_READ
6137 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6138 {
6139 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6140 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6141 vmxHCSetPendingXcptGP(pVCpu, 0);
6142 return VINF_HM_PENDING_XCPT;
6143 }
6144
6145 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6146 if ( GCPtrFirst32 > pSel->u32Limit
6147 || GCPtrLast32 > pSel->u32Limit)
6148 {
6149 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6150 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6151 if (iSegReg == X86_SREG_SS)
6152 vmxHCSetPendingXcptSS(pVCpu, 0);
6153 else
6154 vmxHCSetPendingXcptGP(pVCpu, 0);
6155 return VINF_HM_PENDING_XCPT;
6156 }
6157 }
6158 }
6159 else
6160 {
6161 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6162 vmxHCSetPendingXcptGP(pVCpu, 0);
6163 return VINF_HM_PENDING_XCPT;
6164 }
6165
6166 *pGCPtrMem = GCPtrMem;
6167 return VINF_SUCCESS;
6168}
6169#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6170
6171
6172/**
6173 * VM-exit helper for LMSW.
6174 */
6175static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6176{
6177 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6178 AssertRCReturn(rc, rc);
6179
6180 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6181 AssertMsg( rcStrict == VINF_SUCCESS
6182 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6183
6184 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6185 if (rcStrict == VINF_IEM_RAISED_XCPT)
6186 {
6187 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6188 rcStrict = VINF_SUCCESS;
6189 }
6190
6191 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6192 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6193 return rcStrict;
6194}
6195
6196
6197/**
6198 * VM-exit helper for CLTS.
6199 */
6200static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6201{
6202 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6203 AssertRCReturn(rc, rc);
6204
6205 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6206 AssertMsg( rcStrict == VINF_SUCCESS
6207 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6208
6209 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6210 if (rcStrict == VINF_IEM_RAISED_XCPT)
6211 {
6212 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6213 rcStrict = VINF_SUCCESS;
6214 }
6215
6216 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6217 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6218 return rcStrict;
6219}
6220
6221
6222/**
6223 * VM-exit helper for MOV from CRx (CRx read).
6224 */
6225static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6226{
6227 Assert(iCrReg < 16);
6228 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6229
6230 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6231 AssertRCReturn(rc, rc);
6232
6233 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6234 AssertMsg( rcStrict == VINF_SUCCESS
6235 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6236
6237 if (iGReg == X86_GREG_xSP)
6238 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6239 else
6240 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6241#ifdef VBOX_WITH_STATISTICS
6242 switch (iCrReg)
6243 {
6244 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6245 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6246 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6247 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6248 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6249 }
6250#endif
6251 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6252 return rcStrict;
6253}
6254
6255
6256/**
6257 * VM-exit helper for MOV to CRx (CRx write).
6258 */
6259static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6260{
6261 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6262
6263 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6264 AssertMsg( rcStrict == VINF_SUCCESS
6265 || rcStrict == VINF_IEM_RAISED_XCPT
6266 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6267
6268 switch (iCrReg)
6269 {
6270 case 0:
6271 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6272 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6273 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6274 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6275 break;
6276
6277 case 2:
6278 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6279 /* Nothing to do here, CR2 it's not part of the VMCS. */
6280 break;
6281
6282 case 3:
6283 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6284 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6285 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6286 break;
6287
6288 case 4:
6289 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6290 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6291#ifndef IN_NEM_DARWIN
6292 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6293 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6294#else
6295 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6296#endif
6297 break;
6298
6299 case 8:
6300 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6301 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6302 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6303 break;
6304
6305 default:
6306 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6307 break;
6308 }
6309
6310 if (rcStrict == VINF_IEM_RAISED_XCPT)
6311 {
6312 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6313 rcStrict = VINF_SUCCESS;
6314 }
6315 return rcStrict;
6316}
6317
6318
6319/**
6320 * VM-exit exception handler for \#PF (Page-fault exception).
6321 *
6322 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6323 */
6324static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6325{
6326 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6327 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6328
6329#ifndef IN_NEM_DARWIN
6330 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6331 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6332 { /* likely */ }
6333 else
6334#endif
6335 {
6336#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6337 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6338#endif
6339 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6340 if (!pVmxTransient->fVectoringDoublePF)
6341 {
6342 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6343 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6344 }
6345 else
6346 {
6347 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6348 Assert(!pVmxTransient->fIsNestedGuest);
6349 vmxHCSetPendingXcptDF(pVCpu);
6350 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6351 }
6352 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6353 return VINF_SUCCESS;
6354 }
6355
6356 Assert(!pVmxTransient->fIsNestedGuest);
6357
6358 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6359 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6360 if (pVmxTransient->fVectoringPF)
6361 {
6362 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6363 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6364 }
6365
6366 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6367 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6368 AssertRCReturn(rc, rc);
6369
6370 Log4Func(("#PF: cs:rip=%#04x:%#RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6371 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6372
6373 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6374 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6375
6376 Log4Func(("#PF: rc=%Rrc\n", rc));
6377 if (rc == VINF_SUCCESS)
6378 {
6379 /*
6380 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6381 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6382 */
6383 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6384 TRPMResetTrap(pVCpu);
6385 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6386 return rc;
6387 }
6388
6389 if (rc == VINF_EM_RAW_GUEST_TRAP)
6390 {
6391 if (!pVmxTransient->fVectoringDoublePF)
6392 {
6393 /* It's a guest page fault and needs to be reflected to the guest. */
6394 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6395 TRPMResetTrap(pVCpu);
6396 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6397 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6398 uGstErrorCode, pVmxTransient->uExitQual);
6399 }
6400 else
6401 {
6402 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6403 TRPMResetTrap(pVCpu);
6404 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6405 vmxHCSetPendingXcptDF(pVCpu);
6406 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6407 }
6408
6409 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6410 return VINF_SUCCESS;
6411 }
6412
6413 TRPMResetTrap(pVCpu);
6414 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6415 return rc;
6416}
6417
6418
6419/**
6420 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6421 *
6422 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6423 */
6424static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6425{
6426 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6427 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6428
6429 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6430 AssertRCReturn(rc, rc);
6431
6432 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6433 {
6434 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6435 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6436
6437 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6438 * provides VM-exit instruction length. If this causes problem later,
6439 * disassemble the instruction like it's done on AMD-V. */
6440 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6441 AssertRCReturn(rc2, rc2);
6442 return rc;
6443 }
6444
6445 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6446 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6447 return VINF_SUCCESS;
6448}
6449
6450
6451/**
6452 * VM-exit exception handler for \#BP (Breakpoint exception).
6453 *
6454 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6455 */
6456static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6457{
6458 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6459 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6460
6461 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6462 AssertRCReturn(rc, rc);
6463
6464 VBOXSTRICTRC rcStrict;
6465 if (!pVmxTransient->fIsNestedGuest)
6466 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6467 else
6468 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6469
6470 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6471 {
6472 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6473 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6474 rcStrict = VINF_SUCCESS;
6475 }
6476
6477 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6478 return rcStrict;
6479}
6480
6481
6482/**
6483 * VM-exit exception handler for \#AC (Alignment-check exception).
6484 *
6485 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6486 */
6487static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6488{
6489 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6490
6491 /*
6492 * Detect #ACs caused by host having enabled split-lock detection.
6493 * Emulate such instructions.
6494 */
6495 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6496 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6497 AssertRCReturn(rc, rc);
6498 /** @todo detect split lock in cpu feature? */
6499 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6500 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6501 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6502 || CPUMGetGuestCPL(pVCpu) != 3
6503 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6504 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6505 {
6506 /*
6507 * Check for debug/trace events and import state accordingly.
6508 */
6509 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6510 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6511 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6512#ifndef IN_NEM_DARWIN
6513 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6514#endif
6515 )
6516 {
6517 if (pVM->cCpus == 1)
6518 {
6519#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6520 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6521#else
6522 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6523#endif
6524 AssertRCReturn(rc, rc);
6525 }
6526 }
6527 else
6528 {
6529 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6530 AssertRCReturn(rc, rc);
6531
6532 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6533
6534 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6535 {
6536 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6537 if (rcStrict != VINF_SUCCESS)
6538 return rcStrict;
6539 }
6540 }
6541
6542 /*
6543 * Emulate the instruction.
6544 *
6545 * We have to ignore the LOCK prefix here as we must not retrigger the
6546 * detection on the host. This isn't all that satisfactory, though...
6547 */
6548 if (pVM->cCpus == 1)
6549 {
6550 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6551 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6552
6553 /** @todo For SMP configs we should do a rendezvous here. */
6554 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6555 if (rcStrict == VINF_SUCCESS)
6556#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6557 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6558 HM_CHANGED_GUEST_RIP
6559 | HM_CHANGED_GUEST_RFLAGS
6560 | HM_CHANGED_GUEST_GPRS_MASK
6561 | HM_CHANGED_GUEST_CS
6562 | HM_CHANGED_GUEST_SS);
6563#else
6564 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6565#endif
6566 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6567 {
6568 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6569 rcStrict = VINF_SUCCESS;
6570 }
6571 return rcStrict;
6572 }
6573 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6574 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6575 return VINF_EM_EMULATE_SPLIT_LOCK;
6576 }
6577
6578 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6579 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6580 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6581
6582 /* Re-inject it. We'll detect any nesting before getting here. */
6583 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6584 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6585 return VINF_SUCCESS;
6586}
6587
6588
6589/**
6590 * VM-exit exception handler for \#DB (Debug exception).
6591 *
6592 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6593 */
6594static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6595{
6596 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6597 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6598
6599 /*
6600 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6601 */
6602 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6603
6604 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6605 uint64_t const uDR6 = X86_DR6_INIT_VAL
6606 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6607 | X86_DR6_BD | X86_DR6_BS));
6608
6609 int rc;
6610 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6611 if (!pVmxTransient->fIsNestedGuest)
6612 {
6613 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6614
6615 /*
6616 * Prevents stepping twice over the same instruction when the guest is stepping using
6617 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6618 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6619 */
6620 if ( rc == VINF_EM_DBG_STEPPED
6621 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6622 {
6623 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6624 rc = VINF_EM_RAW_GUEST_TRAP;
6625 }
6626 }
6627 else
6628 rc = VINF_EM_RAW_GUEST_TRAP;
6629 Log6Func(("rc=%Rrc\n", rc));
6630 if (rc == VINF_EM_RAW_GUEST_TRAP)
6631 {
6632 /*
6633 * The exception was for the guest. Update DR6, DR7.GD and
6634 * IA32_DEBUGCTL.LBR before forwarding it.
6635 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6636 */
6637#ifndef IN_NEM_DARWIN
6638 VMMRZCallRing3Disable(pVCpu);
6639 HM_DISABLE_PREEMPT(pVCpu);
6640
6641 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6642 pCtx->dr[6] |= uDR6;
6643 if (CPUMIsGuestDebugStateActive(pVCpu))
6644 ASMSetDR6(pCtx->dr[6]);
6645
6646 HM_RESTORE_PREEMPT();
6647 VMMRZCallRing3Enable(pVCpu);
6648#else
6649 /** @todo */
6650#endif
6651
6652 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6653 AssertRCReturn(rc, rc);
6654
6655 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6656 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6657
6658 /* Paranoia. */
6659 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6660 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6661
6662 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6663 AssertRC(rc);
6664
6665 /*
6666 * Raise #DB in the guest.
6667 *
6668 * It is important to reflect exactly what the VM-exit gave us (preserving the
6669 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6670 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6671 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6672 *
6673 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6674 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6675 */
6676 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6677 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6678 return VINF_SUCCESS;
6679 }
6680
6681 /*
6682 * Not a guest trap, must be a hypervisor related debug event then.
6683 * Update DR6 in case someone is interested in it.
6684 */
6685 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6686 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6687 CPUMSetHyperDR6(pVCpu, uDR6);
6688
6689 return rc;
6690}
6691
6692
6693/**
6694 * Hacks its way around the lovely mesa driver's backdoor accesses.
6695 *
6696 * @sa hmR0SvmHandleMesaDrvGp.
6697 */
6698static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6699{
6700 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6701 RT_NOREF(pCtx);
6702
6703 /* For now we'll just skip the instruction. */
6704 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6705}
6706
6707
6708/**
6709 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6710 * backdoor logging w/o checking what it is running inside.
6711 *
6712 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6713 * backdoor port and magic numbers loaded in registers.
6714 *
6715 * @returns true if it is, false if it isn't.
6716 * @sa hmR0SvmIsMesaDrvGp.
6717 */
6718DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6719{
6720 /* 0xed: IN eAX,dx */
6721 uint8_t abInstr[1];
6722 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6723 return false;
6724
6725 /* Check that it is #GP(0). */
6726 if (pVmxTransient->uExitIntErrorCode != 0)
6727 return false;
6728
6729 /* Check magic and port. */
6730 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6731 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6732 if (pCtx->rax != UINT32_C(0x564d5868))
6733 return false;
6734 if (pCtx->dx != UINT32_C(0x5658))
6735 return false;
6736
6737 /* Flat ring-3 CS. */
6738 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6739 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6740 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6741 if (pCtx->cs.Attr.n.u2Dpl != 3)
6742 return false;
6743 if (pCtx->cs.u64Base != 0)
6744 return false;
6745
6746 /* Check opcode. */
6747 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6748 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6749 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6750 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6751 if (RT_FAILURE(rc))
6752 return false;
6753 if (abInstr[0] != 0xed)
6754 return false;
6755
6756 return true;
6757}
6758
6759
6760/**
6761 * VM-exit exception handler for \#GP (General-protection exception).
6762 *
6763 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6764 */
6765static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6766{
6767 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6768 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6769
6770 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6771 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6772#ifndef IN_NEM_DARWIN
6773 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6774 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6775 { /* likely */ }
6776 else
6777#endif
6778 {
6779#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6780# ifndef IN_NEM_DARWIN
6781 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6782# else
6783 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6784# endif
6785#endif
6786 /*
6787 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6788 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6789 */
6790 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6791 AssertRCReturn(rc, rc);
6792 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6793 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6794
6795 if ( pVmxTransient->fIsNestedGuest
6796 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6797 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6798 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6799 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6800 else
6801 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6802 return rc;
6803 }
6804
6805#ifndef IN_NEM_DARWIN
6806 Assert(CPUMIsGuestInRealModeEx(pCtx));
6807 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6808 Assert(!pVmxTransient->fIsNestedGuest);
6809
6810 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6811 AssertRCReturn(rc, rc);
6812
6813 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6814 if (rcStrict == VINF_SUCCESS)
6815 {
6816 if (!CPUMIsGuestInRealModeEx(pCtx))
6817 {
6818 /*
6819 * The guest is no longer in real-mode, check if we can continue executing the
6820 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6821 */
6822 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6823 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6824 {
6825 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6826 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6827 }
6828 else
6829 {
6830 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6831 rcStrict = VINF_EM_RESCHEDULE;
6832 }
6833 }
6834 else
6835 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6836 }
6837 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6838 {
6839 rcStrict = VINF_SUCCESS;
6840 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6841 }
6842 return VBOXSTRICTRC_VAL(rcStrict);
6843#endif
6844}
6845
6846
6847/**
6848 * VM-exit exception handler wrapper for all other exceptions that are not handled
6849 * by a specific handler.
6850 *
6851 * This simply re-injects the exception back into the VM without any special
6852 * processing.
6853 *
6854 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6855 */
6856static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6857{
6858 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6859
6860#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6861# ifndef IN_NEM_DARWIN
6862 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6863 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
6864 ("uVector=%#x u32XcptBitmap=%#X32\n",
6865 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
6866 NOREF(pVmcsInfo);
6867# endif
6868#endif
6869
6870 /*
6871 * Re-inject the exception into the guest. This cannot be a double-fault condition which
6872 * would have been handled while checking exits due to event delivery.
6873 */
6874 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6875
6876#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6877 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6878 AssertRCReturn(rc, rc);
6879 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6880#endif
6881
6882#ifdef VBOX_WITH_STATISTICS
6883 switch (uVector)
6884 {
6885 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
6886 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
6887 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
6888 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6889 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
6890 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
6891 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6892 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
6893 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
6894 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
6895 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
6896 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
6897 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
6898 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
6899 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
6900 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
6901 default:
6902 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
6903 break;
6904 }
6905#endif
6906
6907 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
6908 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
6909 NOREF(uVector);
6910
6911 /* Re-inject the original exception into the guest. */
6912 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6913 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6914 return VINF_SUCCESS;
6915}
6916
6917
6918/**
6919 * VM-exit exception handler for all exceptions (except NMIs!).
6920 *
6921 * @remarks This may be called for both guests and nested-guests. Take care to not
6922 * make assumptions and avoid doing anything that is not relevant when
6923 * executing a nested-guest (e.g., Mesa driver hacks).
6924 */
6925static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6926{
6927 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6928
6929 /*
6930 * If this VM-exit occurred while delivering an event through the guest IDT, take
6931 * action based on the return code and additional hints (e.g. for page-faults)
6932 * that will be updated in the VMX transient structure.
6933 */
6934 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
6935 if (rcStrict == VINF_SUCCESS)
6936 {
6937 /*
6938 * If an exception caused a VM-exit due to delivery of an event, the original
6939 * event may have to be re-injected into the guest. We shall reinject it and
6940 * continue guest execution. However, page-fault is a complicated case and
6941 * needs additional processing done in vmxHCExitXcptPF().
6942 */
6943 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
6944 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6945 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
6946 || uVector == X86_XCPT_PF)
6947 {
6948 switch (uVector)
6949 {
6950 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
6951 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
6952 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
6953 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
6954 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
6955 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
6956 default:
6957 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
6958 }
6959 }
6960 /* else: inject pending event before resuming guest execution. */
6961 }
6962 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
6963 {
6964 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6965 rcStrict = VINF_SUCCESS;
6966 }
6967
6968 return rcStrict;
6969}
6970/** @} */
6971
6972
6973/** @name VM-exit handlers.
6974 * @{
6975 */
6976/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6977/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6978/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6979
6980/**
6981 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
6982 */
6983HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6984{
6985 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6986 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
6987
6988#ifndef IN_NEM_DARWIN
6989 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
6990 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
6991 return VINF_SUCCESS;
6992 return VINF_EM_RAW_INTERRUPT;
6993#else
6994 return VINF_SUCCESS;
6995#endif
6996}
6997
6998
6999/**
7000 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7001 * VM-exit.
7002 */
7003HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7004{
7005 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7006 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7007
7008 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
7009
7010 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7011 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7012 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7013
7014 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7015 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7016 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7017 NOREF(pVmcsInfo);
7018
7019 VBOXSTRICTRC rcStrict;
7020 switch (uExitIntType)
7021 {
7022#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7023 /*
7024 * Host physical NMIs:
7025 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7026 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7027 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7028 *
7029 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7030 * See Intel spec. 27.5.5 "Updating Non-Register State".
7031 */
7032 case VMX_EXIT_INT_INFO_TYPE_NMI:
7033 {
7034 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7035 break;
7036 }
7037#endif
7038
7039 /*
7040 * Privileged software exceptions (#DB from ICEBP),
7041 * Software exceptions (#BP and #OF),
7042 * Hardware exceptions:
7043 * Process the required exceptions and resume guest execution if possible.
7044 */
7045 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7046 Assert(uVector == X86_XCPT_DB);
7047 RT_FALL_THRU();
7048 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7049 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7050 RT_FALL_THRU();
7051 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7052 {
7053 NOREF(uVector);
7054 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
7055 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7056 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
7057 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
7058
7059 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7060 break;
7061 }
7062
7063 default:
7064 {
7065 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7066 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7067 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7068 break;
7069 }
7070 }
7071
7072 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7073 return rcStrict;
7074}
7075
7076
7077/**
7078 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7079 */
7080HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7081{
7082 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7083
7084 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7085 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7086 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7087
7088 /* Evaluate and deliver pending events and resume guest execution. */
7089 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7090 return VINF_SUCCESS;
7091}
7092
7093
7094/**
7095 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7096 */
7097HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7098{
7099 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7100
7101 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7102 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7103 {
7104 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7105 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7106 }
7107
7108 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7109
7110 /*
7111 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7112 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7113 */
7114 uint32_t fIntrState;
7115 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7116 AssertRC(rc);
7117 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7118 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7119 {
7120 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7121 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7122
7123 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7124 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7125 AssertRC(rc);
7126 }
7127
7128 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7129 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7130
7131 /* Evaluate and deliver pending events and resume guest execution. */
7132 return VINF_SUCCESS;
7133}
7134
7135
7136/**
7137 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7138 */
7139HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7140{
7141 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7142 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7143}
7144
7145
7146/**
7147 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7148 */
7149HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7150{
7151 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7152 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7153}
7154
7155
7156/**
7157 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7158 */
7159HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7160{
7161 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7162
7163 /*
7164 * Get the state we need and update the exit history entry.
7165 */
7166 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7167 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7168
7169 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7170 AssertRCReturn(rc, rc);
7171
7172 VBOXSTRICTRC rcStrict;
7173 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7174 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7175 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7176 if (!pExitRec)
7177 {
7178 /*
7179 * Regular CPUID instruction execution.
7180 */
7181 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7182 if (rcStrict == VINF_SUCCESS)
7183 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7184 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7185 {
7186 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7187 rcStrict = VINF_SUCCESS;
7188 }
7189 }
7190 else
7191 {
7192 /*
7193 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7194 */
7195 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7196 AssertRCReturn(rc2, rc2);
7197
7198 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7199 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7200
7201 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7202 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7203
7204 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7205 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7206 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7207 }
7208 return rcStrict;
7209}
7210
7211
7212/**
7213 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7214 */
7215HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7216{
7217 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7218
7219 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7220 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7221 AssertRCReturn(rc, rc);
7222
7223 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7224 return VINF_EM_RAW_EMULATE_INSTR;
7225
7226 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7227 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7228}
7229
7230
7231/**
7232 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7233 */
7234HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7235{
7236 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7237
7238 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7239 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7240 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7241 AssertRCReturn(rc, rc);
7242
7243 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7244 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7245 {
7246 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7247 we must reset offsetting on VM-entry. See @bugref{6634}. */
7248 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7249 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7250 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7251 }
7252 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7253 {
7254 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7255 rcStrict = VINF_SUCCESS;
7256 }
7257 return rcStrict;
7258}
7259
7260
7261/**
7262 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7263 */
7264HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7265{
7266 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7267
7268 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7269 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7270 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7271 AssertRCReturn(rc, rc);
7272
7273 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7274 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7275 {
7276 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7277 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7278 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7279 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7280 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7281 }
7282 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7283 {
7284 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7285 rcStrict = VINF_SUCCESS;
7286 }
7287 return rcStrict;
7288}
7289
7290
7291/**
7292 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7293 */
7294HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7295{
7296 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7297
7298 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7299 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7300 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7301 AssertRCReturn(rc, rc);
7302
7303 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7304 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7305 if (RT_LIKELY(rc == VINF_SUCCESS))
7306 {
7307 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7308 Assert(pVmxTransient->cbExitInstr == 2);
7309 }
7310 else
7311 {
7312 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7313 rc = VERR_EM_INTERPRETER;
7314 }
7315 return rc;
7316}
7317
7318
7319/**
7320 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7321 */
7322HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7323{
7324 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7325
7326 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7327 if (EMAreHypercallInstructionsEnabled(pVCpu))
7328 {
7329 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7330 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7331 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7332 AssertRCReturn(rc, rc);
7333
7334 /* Perform the hypercall. */
7335 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7336 if (rcStrict == VINF_SUCCESS)
7337 {
7338 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7339 AssertRCReturn(rc, rc);
7340 }
7341 else
7342 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7343 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7344 || RT_FAILURE(rcStrict));
7345
7346 /* If the hypercall changes anything other than guest's general-purpose registers,
7347 we would need to reload the guest changed bits here before VM-entry. */
7348 }
7349 else
7350 Log4Func(("Hypercalls not enabled\n"));
7351
7352 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7353 if (RT_FAILURE(rcStrict))
7354 {
7355 vmxHCSetPendingXcptUD(pVCpu);
7356 rcStrict = VINF_SUCCESS;
7357 }
7358
7359 return rcStrict;
7360}
7361
7362
7363/**
7364 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7365 */
7366HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7367{
7368 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7369#ifndef IN_NEM_DARWIN
7370 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7371#endif
7372
7373 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7374 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7375 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7376 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7377 AssertRCReturn(rc, rc);
7378
7379 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7380
7381 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7382 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7383 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7384 {
7385 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7386 rcStrict = VINF_SUCCESS;
7387 }
7388 else
7389 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7390 VBOXSTRICTRC_VAL(rcStrict)));
7391 return rcStrict;
7392}
7393
7394
7395/**
7396 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7397 */
7398HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7399{
7400 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7401
7402 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7403 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7404 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7405 AssertRCReturn(rc, rc);
7406
7407 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7408 if (rcStrict == VINF_SUCCESS)
7409 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7410 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7411 {
7412 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7413 rcStrict = VINF_SUCCESS;
7414 }
7415
7416 return rcStrict;
7417}
7418
7419
7420/**
7421 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7422 */
7423HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7424{
7425 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7426
7427 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7428 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7429 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7430 AssertRCReturn(rc, rc);
7431
7432 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7433 if (RT_SUCCESS(rcStrict))
7434 {
7435 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7436 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7437 rcStrict = VINF_SUCCESS;
7438 }
7439
7440 return rcStrict;
7441}
7442
7443
7444/**
7445 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7446 * VM-exit.
7447 */
7448HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7449{
7450 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7451 return VINF_EM_RESET;
7452}
7453
7454
7455/**
7456 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7457 */
7458HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7459{
7460 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7461
7462 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7463 AssertRCReturn(rc, rc);
7464
7465 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7466 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7467 rc = VINF_SUCCESS;
7468 else
7469 rc = VINF_EM_HALT;
7470
7471 if (rc != VINF_SUCCESS)
7472 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7473 return rc;
7474}
7475
7476
7477/**
7478 * VM-exit handler for instructions that result in a \#UD exception delivered to
7479 * the guest.
7480 */
7481HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7482{
7483 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7484 vmxHCSetPendingXcptUD(pVCpu);
7485 return VINF_SUCCESS;
7486}
7487
7488
7489/**
7490 * VM-exit handler for expiry of the VMX-preemption timer.
7491 */
7492HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7493{
7494 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7495
7496 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7497 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7498Log12(("vmxHCExitPreemptTimer:\n"));
7499
7500 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7501 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7502 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7503 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7504 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7505}
7506
7507
7508/**
7509 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7510 */
7511HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7512{
7513 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7514
7515 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7516 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7517 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7518 AssertRCReturn(rc, rc);
7519
7520 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7521 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7522 : HM_CHANGED_RAISED_XCPT_MASK);
7523
7524#ifndef IN_NEM_DARWIN
7525 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7526 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7527 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7528 {
7529 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7530 hmR0VmxUpdateStartVmFunction(pVCpu);
7531 }
7532#endif
7533
7534 return rcStrict;
7535}
7536
7537
7538/**
7539 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7540 */
7541HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7542{
7543 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7544
7545 /** @todo Enable the new code after finding a reliably guest test-case. */
7546#if 1
7547 return VERR_EM_INTERPRETER;
7548#else
7549 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7550 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
7551 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7552 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7553 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7554 AssertRCReturn(rc, rc);
7555
7556 /* Paranoia. Ensure this has a memory operand. */
7557 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7558
7559 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7560 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7561 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7562 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7563
7564 RTGCPTR GCPtrDesc;
7565 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7566
7567 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7568 GCPtrDesc, uType);
7569 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7570 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7571 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7572 {
7573 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7574 rcStrict = VINF_SUCCESS;
7575 }
7576 return rcStrict;
7577#endif
7578}
7579
7580
7581/**
7582 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7583 * VM-exit.
7584 */
7585HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7586{
7587 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7588 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7589 AssertRCReturn(rc, rc);
7590
7591 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7592 if (RT_FAILURE(rc))
7593 return rc;
7594
7595 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7596 NOREF(uInvalidReason);
7597
7598#ifdef VBOX_STRICT
7599 uint32_t fIntrState;
7600 uint64_t u64Val;
7601 vmxHCReadEntryIntInfoVmcs(pVCpu, pVmxTransient);
7602 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7603 vmxHCReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7604
7605 Log4(("uInvalidReason %u\n", uInvalidReason));
7606 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7607 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7608 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7609
7610 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7611 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7612 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7613 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7614 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7615 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7616 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7617 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7618 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7619 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7620 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7621 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7622# ifndef IN_NEM_DARWIN
7623 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7624 {
7625 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7626 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7627 }
7628
7629 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7630# endif
7631#endif
7632
7633 return VERR_VMX_INVALID_GUEST_STATE;
7634}
7635
7636/**
7637 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7638 */
7639HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7640{
7641 /*
7642 * Cumulative notes of all recognized but unexpected VM-exits.
7643 *
7644 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7645 * nested-paging is used.
7646 *
7647 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7648 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7649 * this function (and thereby stop VM execution) for handling such instructions.
7650 *
7651 *
7652 * VMX_EXIT_INIT_SIGNAL:
7653 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7654 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7655 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7656 *
7657 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7658 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7659 * See Intel spec. "23.8 Restrictions on VMX operation".
7660 *
7661 * VMX_EXIT_SIPI:
7662 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7663 * activity state is used. We don't make use of it as our guests don't have direct
7664 * access to the host local APIC.
7665 *
7666 * See Intel spec. 25.3 "Other Causes of VM-exits".
7667 *
7668 * VMX_EXIT_IO_SMI:
7669 * VMX_EXIT_SMI:
7670 * This can only happen if we support dual-monitor treatment of SMI, which can be
7671 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7672 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7673 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7674 *
7675 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7676 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7677 *
7678 * VMX_EXIT_ERR_MSR_LOAD:
7679 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7680 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7681 * execution.
7682 *
7683 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7684 *
7685 * VMX_EXIT_ERR_MACHINE_CHECK:
7686 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7687 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7688 * #MC exception abort class exception is raised. We thus cannot assume a
7689 * reasonable chance of continuing any sort of execution and we bail.
7690 *
7691 * See Intel spec. 15.1 "Machine-check Architecture".
7692 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7693 *
7694 * VMX_EXIT_PML_FULL:
7695 * VMX_EXIT_VIRTUALIZED_EOI:
7696 * VMX_EXIT_APIC_WRITE:
7697 * We do not currently support any of these features and thus they are all unexpected
7698 * VM-exits.
7699 *
7700 * VMX_EXIT_GDTR_IDTR_ACCESS:
7701 * VMX_EXIT_LDTR_TR_ACCESS:
7702 * VMX_EXIT_RDRAND:
7703 * VMX_EXIT_RSM:
7704 * VMX_EXIT_VMFUNC:
7705 * VMX_EXIT_ENCLS:
7706 * VMX_EXIT_RDSEED:
7707 * VMX_EXIT_XSAVES:
7708 * VMX_EXIT_XRSTORS:
7709 * VMX_EXIT_UMWAIT:
7710 * VMX_EXIT_TPAUSE:
7711 * VMX_EXIT_LOADIWKEY:
7712 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7713 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7714 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7715 *
7716 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7717 */
7718 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7719 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7720 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7721}
7722
7723
7724/**
7725 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7726 */
7727HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7728{
7729 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7730
7731 /** @todo Optimize this: We currently drag in the whole MSR state
7732 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7733 * MSRs required. That would require changes to IEM and possibly CPUM too.
7734 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7735 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7736 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7737 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7738 switch (idMsr)
7739 {
7740 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7741 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7742 }
7743
7744 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7745 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7746 AssertRCReturn(rc, rc);
7747
7748 Log4Func(("ecx=%#RX32\n", idMsr));
7749
7750#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7751 Assert(!pVmxTransient->fIsNestedGuest);
7752 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7753 {
7754 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7755 && idMsr != MSR_K6_EFER)
7756 {
7757 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7758 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7759 }
7760 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7761 {
7762 Assert(pVmcsInfo->pvMsrBitmap);
7763 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7764 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7765 {
7766 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7767 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7768 }
7769 }
7770 }
7771#endif
7772
7773 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7774 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7775 if (rcStrict == VINF_SUCCESS)
7776 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7777 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7778 {
7779 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7780 rcStrict = VINF_SUCCESS;
7781 }
7782 else
7783 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7784 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7785
7786 return rcStrict;
7787}
7788
7789
7790/**
7791 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7792 */
7793HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7794{
7795 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7796
7797 /** @todo Optimize this: We currently drag in the whole MSR state
7798 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7799 * MSRs required. That would require changes to IEM and possibly CPUM too.
7800 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7801 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7802 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7803
7804 /*
7805 * The FS and GS base MSRs are not part of the above all-MSRs mask.
7806 * Although we don't need to fetch the base as it will be overwritten shortly, while
7807 * loading guest-state we would also load the entire segment register including limit
7808 * and attributes and thus we need to load them here.
7809 */
7810 switch (idMsr)
7811 {
7812 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7813 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7814 }
7815
7816 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7817 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7818 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7819 AssertRCReturn(rc, rc);
7820
7821 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
7822
7823 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
7824 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
7825
7826 if (rcStrict == VINF_SUCCESS)
7827 {
7828 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7829
7830 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7831 if ( idMsr == MSR_IA32_APICBASE
7832 || ( idMsr >= MSR_IA32_X2APIC_START
7833 && idMsr <= MSR_IA32_X2APIC_END))
7834 {
7835 /*
7836 * We've already saved the APIC related guest-state (TPR) in post-run phase.
7837 * When full APIC register virtualization is implemented we'll have to make
7838 * sure APIC state is saved from the VMCS before IEM changes it.
7839 */
7840 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7841 }
7842 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7843 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7844 else if (idMsr == MSR_K6_EFER)
7845 {
7846 /*
7847 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
7848 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
7849 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
7850 */
7851 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
7852 }
7853
7854 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
7855 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
7856 {
7857 switch (idMsr)
7858 {
7859 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7860 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7861 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7862 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
7863 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
7864 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
7865 default:
7866 {
7867#ifndef IN_NEM_DARWIN
7868 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7869 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
7870 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7871 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
7872#else
7873 AssertMsgFailed(("TODO\n"));
7874#endif
7875 break;
7876 }
7877 }
7878 }
7879#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7880 else
7881 {
7882 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7883 switch (idMsr)
7884 {
7885 case MSR_IA32_SYSENTER_CS:
7886 case MSR_IA32_SYSENTER_EIP:
7887 case MSR_IA32_SYSENTER_ESP:
7888 case MSR_K8_FS_BASE:
7889 case MSR_K8_GS_BASE:
7890 {
7891 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
7892 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7893 }
7894
7895 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
7896 default:
7897 {
7898 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7899 {
7900 /* EFER MSR writes are always intercepted. */
7901 if (idMsr != MSR_K6_EFER)
7902 {
7903 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7904 idMsr));
7905 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7906 }
7907 }
7908
7909 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7910 {
7911 Assert(pVmcsInfo->pvMsrBitmap);
7912 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7913 if (fMsrpm & VMXMSRPM_ALLOW_WR)
7914 {
7915 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
7916 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7917 }
7918 }
7919 break;
7920 }
7921 }
7922 }
7923#endif /* VBOX_STRICT */
7924 }
7925 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7926 {
7927 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7928 rcStrict = VINF_SUCCESS;
7929 }
7930 else
7931 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
7932 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7933
7934 return rcStrict;
7935}
7936
7937
7938/**
7939 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7940 */
7941HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7942{
7943 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7944
7945 /** @todo The guest has likely hit a contended spinlock. We might want to
7946 * poke a schedule different guest VCPU. */
7947 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7948 if (RT_SUCCESS(rc))
7949 return VINF_EM_RAW_INTERRUPT;
7950
7951 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
7952 return rc;
7953}
7954
7955
7956/**
7957 * VM-exit handler for when the TPR value is lowered below the specified
7958 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
7959 */
7960HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7961{
7962 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7963 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
7964
7965 /*
7966 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
7967 * We'll re-evaluate pending interrupts and inject them before the next VM
7968 * entry so we can just continue execution here.
7969 */
7970 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
7971 return VINF_SUCCESS;
7972}
7973
7974
7975/**
7976 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
7977 * VM-exit.
7978 *
7979 * @retval VINF_SUCCESS when guest execution can continue.
7980 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
7981 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
7982 * incompatible guest state for VMX execution (real-on-v86 case).
7983 */
7984HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7985{
7986 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7987 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
7988
7989 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7990 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7991 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7992
7993 VBOXSTRICTRC rcStrict;
7994 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7995 uint64_t const uExitQual = pVmxTransient->uExitQual;
7996 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
7997 switch (uAccessType)
7998 {
7999 /*
8000 * MOV to CRx.
8001 */
8002 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8003 {
8004 /*
8005 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8006 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8007 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8008 * PAE PDPTEs as well.
8009 */
8010 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8011 AssertRCReturn(rc, rc);
8012
8013 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8014#ifndef IN_NEM_DARWIN
8015 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8016#endif
8017 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8018 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8019
8020 /*
8021 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8022 * - When nested paging isn't used.
8023 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8024 * - We are executing in the VM debug loop.
8025 */
8026#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8027# ifndef IN_NEM_DARWIN
8028 Assert( iCrReg != 3
8029 || !VM_IS_VMX_NESTED_PAGING(pVM)
8030 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8031 || pVCpu->hmr0.s.fUsingDebugLoop);
8032# else
8033 Assert( iCrReg != 3
8034 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8035# endif
8036#endif
8037
8038 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8039 Assert( iCrReg != 8
8040 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8041
8042 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8043 AssertMsg( rcStrict == VINF_SUCCESS
8044 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8045
8046#ifndef IN_NEM_DARWIN
8047 /*
8048 * This is a kludge for handling switches back to real mode when we try to use
8049 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8050 * deal with special selector values, so we have to return to ring-3 and run
8051 * there till the selector values are V86 mode compatible.
8052 *
8053 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8054 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8055 * this function.
8056 */
8057 if ( iCrReg == 0
8058 && rcStrict == VINF_SUCCESS
8059 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8060 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8061 && (uOldCr0 & X86_CR0_PE)
8062 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8063 {
8064 /** @todo Check selectors rather than returning all the time. */
8065 Assert(!pVmxTransient->fIsNestedGuest);
8066 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8067 rcStrict = VINF_EM_RESCHEDULE_REM;
8068 }
8069#endif
8070
8071 break;
8072 }
8073
8074 /*
8075 * MOV from CRx.
8076 */
8077 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8078 {
8079 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8080 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8081
8082 /*
8083 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8084 * - When nested paging isn't used.
8085 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8086 * - We are executing in the VM debug loop.
8087 */
8088#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8089# ifndef IN_NEM_DARWIN
8090 Assert( iCrReg != 3
8091 || !VM_IS_VMX_NESTED_PAGING(pVM)
8092 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8093 || pVCpu->hmr0.s.fLeaveDone);
8094# else
8095 Assert( iCrReg != 3
8096 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8097# endif
8098#endif
8099
8100 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8101 Assert( iCrReg != 8
8102 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8103
8104 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8105 break;
8106 }
8107
8108 /*
8109 * CLTS (Clear Task-Switch Flag in CR0).
8110 */
8111 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8112 {
8113 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8114 break;
8115 }
8116
8117 /*
8118 * LMSW (Load Machine-Status Word into CR0).
8119 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8120 */
8121 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8122 {
8123 RTGCPTR GCPtrEffDst;
8124 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8125 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8126 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8127 if (fMemOperand)
8128 {
8129 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
8130 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8131 }
8132 else
8133 GCPtrEffDst = NIL_RTGCPTR;
8134 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8135 break;
8136 }
8137
8138 default:
8139 {
8140 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8141 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8142 }
8143 }
8144
8145 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8146 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8147 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8148
8149 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8150 NOREF(pVM);
8151 return rcStrict;
8152}
8153
8154
8155/**
8156 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8157 * VM-exit.
8158 */
8159HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8160{
8161 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8162 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8163
8164 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8165 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8166 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8167 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8168 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8169 | CPUMCTX_EXTRN_EFER);
8170 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8171 AssertRCReturn(rc, rc);
8172
8173 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8174 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8175 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8176 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8177 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8178 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8179 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8180 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8181
8182 /*
8183 * Update exit history to see if this exit can be optimized.
8184 */
8185 VBOXSTRICTRC rcStrict;
8186 PCEMEXITREC pExitRec = NULL;
8187 if ( !fGstStepping
8188 && !fDbgStepping)
8189 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8190 !fIOString
8191 ? !fIOWrite
8192 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8193 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8194 : !fIOWrite
8195 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8196 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8197 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8198 if (!pExitRec)
8199 {
8200 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8201 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8202
8203 uint32_t const cbValue = s_aIOSizes[uIOSize];
8204 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8205 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8206 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8207 if (fIOString)
8208 {
8209 /*
8210 * INS/OUTS - I/O String instruction.
8211 *
8212 * Use instruction-information if available, otherwise fall back on
8213 * interpreting the instruction.
8214 */
8215 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8216 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8217 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8218 if (fInsOutsInfo)
8219 {
8220 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8221 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8222 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8223 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8224 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8225 if (fIOWrite)
8226 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8227 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8228 else
8229 {
8230 /*
8231 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8232 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8233 * See Intel Instruction spec. for "INS".
8234 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8235 */
8236 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8237 }
8238 }
8239 else
8240 rcStrict = IEMExecOne(pVCpu);
8241
8242 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8243 fUpdateRipAlready = true;
8244 }
8245 else
8246 {
8247 /*
8248 * IN/OUT - I/O instruction.
8249 */
8250 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8251 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8252 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8253 if (fIOWrite)
8254 {
8255 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8256 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8257#ifndef IN_NEM_DARWIN
8258 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8259 && !pCtx->eflags.Bits.u1TF)
8260 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8261#endif
8262 }
8263 else
8264 {
8265 uint32_t u32Result = 0;
8266 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8267 if (IOM_SUCCESS(rcStrict))
8268 {
8269 /* Save result of I/O IN instr. in AL/AX/EAX. */
8270 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8271 }
8272#ifndef IN_NEM_DARWIN
8273 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8274 && !pCtx->eflags.Bits.u1TF)
8275 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8276#endif
8277 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8278 }
8279 }
8280
8281 if (IOM_SUCCESS(rcStrict))
8282 {
8283 if (!fUpdateRipAlready)
8284 {
8285 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8286 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8287 }
8288
8289 /*
8290 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8291 * while booting Fedora 17 64-bit guest.
8292 *
8293 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8294 */
8295 if (fIOString)
8296 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8297
8298 /*
8299 * If any I/O breakpoints are armed, we need to check if one triggered
8300 * and take appropriate action.
8301 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8302 */
8303 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8304 AssertRCReturn(rc, rc);
8305
8306 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8307 * execution engines about whether hyper BPs and such are pending. */
8308 uint32_t const uDr7 = pCtx->dr[7];
8309 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8310 && X86_DR7_ANY_RW_IO(uDr7)
8311 && (pCtx->cr4 & X86_CR4_DE))
8312 || DBGFBpIsHwIoArmed(pVM)))
8313 {
8314 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8315
8316#ifndef IN_NEM_DARWIN
8317 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8318 VMMRZCallRing3Disable(pVCpu);
8319 HM_DISABLE_PREEMPT(pVCpu);
8320
8321 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8322
8323 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8324 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8325 {
8326 /* Raise #DB. */
8327 if (fIsGuestDbgActive)
8328 ASMSetDR6(pCtx->dr[6]);
8329 if (pCtx->dr[7] != uDr7)
8330 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8331
8332 vmxHCSetPendingXcptDB(pVCpu);
8333 }
8334 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8335 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8336 else if ( rcStrict2 != VINF_SUCCESS
8337 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8338 rcStrict = rcStrict2;
8339 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8340
8341 HM_RESTORE_PREEMPT();
8342 VMMRZCallRing3Enable(pVCpu);
8343#else
8344 /** @todo */
8345#endif
8346 }
8347 }
8348
8349#ifdef VBOX_STRICT
8350 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8351 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8352 Assert(!fIOWrite);
8353 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8354 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8355 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8356 Assert(fIOWrite);
8357 else
8358 {
8359# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8360 * statuses, that the VMM device and some others may return. See
8361 * IOM_SUCCESS() for guidance. */
8362 AssertMsg( RT_FAILURE(rcStrict)
8363 || rcStrict == VINF_SUCCESS
8364 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8365 || rcStrict == VINF_EM_DBG_BREAKPOINT
8366 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8367 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8368# endif
8369 }
8370#endif
8371 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8372 }
8373 else
8374 {
8375 /*
8376 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8377 */
8378 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8379 AssertRCReturn(rc2, rc2);
8380 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8381 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8382 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8383 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8384 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8385 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8386
8387 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8388 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8389
8390 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8391 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8392 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8393 }
8394 return rcStrict;
8395}
8396
8397
8398/**
8399 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8400 * VM-exit.
8401 */
8402HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8403{
8404 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8405
8406 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8407 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8408 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8409 {
8410 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8411 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8412 {
8413 uint32_t uErrCode;
8414 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8415 {
8416 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8417 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8418 }
8419 else
8420 uErrCode = 0;
8421
8422 RTGCUINTPTR GCPtrFaultAddress;
8423 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8424 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8425 else
8426 GCPtrFaultAddress = 0;
8427
8428 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8429
8430 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8431 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8432
8433 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8434 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8435 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8436 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8437 }
8438 }
8439
8440 /* Fall back to the interpreter to emulate the task-switch. */
8441 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8442 return VERR_EM_INTERPRETER;
8443}
8444
8445
8446/**
8447 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8448 */
8449HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8450{
8451 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8452
8453 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8454 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8455 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8456 AssertRC(rc);
8457 return VINF_EM_DBG_STEPPED;
8458}
8459
8460
8461/**
8462 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8463 */
8464HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8465{
8466 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8467 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8468
8469 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8470 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8471 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8472 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8473 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8474
8475 /*
8476 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8477 */
8478 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8479 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8480 {
8481 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8482 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8483 {
8484 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8485 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8486 }
8487 }
8488 else
8489 {
8490 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8491 return rcStrict;
8492 }
8493
8494 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8495 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8496 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8497 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8498 AssertRCReturn(rc, rc);
8499
8500 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8501 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8502 switch (uAccessType)
8503 {
8504#ifndef IN_NEM_DARWIN
8505 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8506 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8507 {
8508 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8509 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8510 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8511
8512 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8513 GCPhys &= PAGE_BASE_GC_MASK;
8514 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8515 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8516 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8517
8518 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8519 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8520 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8521 if ( rcStrict == VINF_SUCCESS
8522 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8523 || rcStrict == VERR_PAGE_NOT_PRESENT)
8524 {
8525 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8526 | HM_CHANGED_GUEST_APIC_TPR);
8527 rcStrict = VINF_SUCCESS;
8528 }
8529 break;
8530 }
8531#else
8532 /** @todo */
8533#endif
8534
8535 default:
8536 {
8537 Log4Func(("uAccessType=%#x\n", uAccessType));
8538 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8539 break;
8540 }
8541 }
8542
8543 if (rcStrict != VINF_SUCCESS)
8544 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8545 return rcStrict;
8546}
8547
8548
8549/**
8550 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8551 * VM-exit.
8552 */
8553HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8554{
8555 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8556 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8557
8558 /*
8559 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8560 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8561 * must emulate the MOV DRx access.
8562 */
8563 if (!pVmxTransient->fIsNestedGuest)
8564 {
8565 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8566 if (pVmxTransient->fWasGuestDebugStateActive)
8567 {
8568 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8569 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8570 }
8571
8572 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8573 && !pVmxTransient->fWasHyperDebugStateActive)
8574 {
8575 Assert(!DBGFIsStepping(pVCpu));
8576 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8577
8578 /* Don't intercept MOV DRx any more. */
8579 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8580 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8581 AssertRC(rc);
8582
8583#ifndef IN_NEM_DARWIN
8584 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8585 VMMRZCallRing3Disable(pVCpu);
8586 HM_DISABLE_PREEMPT(pVCpu);
8587
8588 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8589 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8590 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8591
8592 HM_RESTORE_PREEMPT();
8593 VMMRZCallRing3Enable(pVCpu);
8594#else
8595 /** @todo */
8596#endif
8597
8598#ifdef VBOX_WITH_STATISTICS
8599 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8600 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8601 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8602 else
8603 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8604#endif
8605 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8606 return VINF_SUCCESS;
8607 }
8608 }
8609
8610 /*
8611 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8612 * The EFER MSR is always up-to-date.
8613 * Update the segment registers and DR7 from the CPU.
8614 */
8615 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8616 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8617 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8618 AssertRCReturn(rc, rc);
8619 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
8620
8621 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8622 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8623 {
8624 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8625 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8626 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8627 if (RT_SUCCESS(rc))
8628 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8629 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8630 }
8631 else
8632 {
8633 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8634 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8635 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8636 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8637 }
8638
8639 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8640 if (RT_SUCCESS(rc))
8641 {
8642 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8643 AssertRCReturn(rc2, rc2);
8644 return VINF_SUCCESS;
8645 }
8646 return rc;
8647}
8648
8649
8650/**
8651 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8652 * Conditional VM-exit.
8653 */
8654HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8655{
8656 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8657
8658#ifndef IN_NEM_DARWIN
8659 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8660
8661 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8662 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8663 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8664 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8665 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8666
8667 /*
8668 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8669 */
8670 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8671 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8672 {
8673 /*
8674 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8675 * instruction emulation to inject the original event. Otherwise, injecting the original event
8676 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8677 */
8678 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8679 { /* likely */ }
8680 else
8681 {
8682 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8683#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8684 /** @todo NSTVMX: Think about how this should be handled. */
8685 if (pVmxTransient->fIsNestedGuest)
8686 return VERR_VMX_IPE_3;
8687#endif
8688 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8689 }
8690 }
8691 else
8692 {
8693 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8694 return rcStrict;
8695 }
8696
8697 /*
8698 * Get sufficient state and update the exit history entry.
8699 */
8700 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8701 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8702 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8703 AssertRCReturn(rc, rc);
8704
8705 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8706 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8707 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8708 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8709 if (!pExitRec)
8710 {
8711 /*
8712 * If we succeed, resume guest execution.
8713 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8714 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8715 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8716 * weird case. See @bugref{6043}.
8717 */
8718 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8719 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8720/** @todo bird: We can probably just go straight to IOM here and assume that
8721 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8722 * well. However, we need to address that aliasing workarounds that
8723 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8724 *
8725 * Might also be interesting to see if we can get this done more or
8726 * less locklessly inside IOM. Need to consider the lookup table
8727 * updating and use a bit more carefully first (or do all updates via
8728 * rendezvous) */
8729 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8730 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8731 if ( rcStrict == VINF_SUCCESS
8732 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8733 || rcStrict == VERR_PAGE_NOT_PRESENT)
8734 {
8735 /* Successfully handled MMIO operation. */
8736 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8737 | HM_CHANGED_GUEST_APIC_TPR);
8738 rcStrict = VINF_SUCCESS;
8739 }
8740 }
8741 else
8742 {
8743 /*
8744 * Frequent exit or something needing probing. Call EMHistoryExec.
8745 */
8746 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8747 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8748
8749 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8750 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8751
8752 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8753 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8754 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8755 }
8756 return rcStrict;
8757#else
8758 AssertFailed();
8759 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8760#endif
8761}
8762
8763
8764/**
8765 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8766 * VM-exit.
8767 */
8768HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8769{
8770 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8771#ifndef IN_NEM_DARWIN
8772 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8773
8774 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8775 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8776 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8777 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8778 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8779 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8780
8781 /*
8782 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8783 */
8784 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8785 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8786 {
8787 /*
8788 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8789 * we shall resolve the nested #PF and re-inject the original event.
8790 */
8791 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8792 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
8793 }
8794 else
8795 {
8796 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8797 return rcStrict;
8798 }
8799
8800 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8801 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8802 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8803 AssertRCReturn(rc, rc);
8804
8805 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8806 uint64_t const uExitQual = pVmxTransient->uExitQual;
8807 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
8808
8809 RTGCUINT uErrorCode = 0;
8810 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
8811 uErrorCode |= X86_TRAP_PF_ID;
8812 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8813 uErrorCode |= X86_TRAP_PF_RW;
8814 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
8815 uErrorCode |= X86_TRAP_PF_P;
8816
8817 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8818 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
8819
8820 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8821
8822 /*
8823 * Handle the pagefault trap for the nested shadow table.
8824 */
8825 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8826 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
8827 TRPMResetTrap(pVCpu);
8828
8829 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8830 if ( rcStrict == VINF_SUCCESS
8831 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8832 || rcStrict == VERR_PAGE_NOT_PRESENT)
8833 {
8834 /* Successfully synced our nested page tables. */
8835 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
8836 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
8837 return VINF_SUCCESS;
8838 }
8839#else
8840 PVM pVM = pVCpu->CTX_SUFF(pVM);
8841 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
8842 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8843 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8844 vmxHCImportGuestRip(pVCpu);
8845 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
8846
8847 /*
8848 * Ask PGM for information about the given GCPhys. We need to check if we're
8849 * out of sync first.
8850 */
8851 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
8852 PGMPHYSNEMPAGEINFO Info;
8853 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
8854 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
8855 if (RT_SUCCESS(rc))
8856 {
8857 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8858 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
8859 {
8860 if (State.fCanResume)
8861 {
8862 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
8863 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8864 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8865 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8866 State.fDidSomething ? "" : " no-change"));
8867 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
8868 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8869 return VINF_SUCCESS;
8870 }
8871 }
8872
8873 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
8874 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8875 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8876 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8877 State.fDidSomething ? "" : " no-change"));
8878 }
8879 else
8880 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
8881 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8882 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
8883
8884 /*
8885 * Emulate the memory access, either access handler or special memory.
8886 */
8887 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
8888 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8889 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
8890 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
8891 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8892
8893 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8894 AssertRCReturn(rc, rc);
8895
8896 VBOXSTRICTRC rcStrict;
8897 if (!pExitRec)
8898 rcStrict = IEMExecOne(pVCpu);
8899 else
8900 {
8901 /* Frequent access or probing. */
8902 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8903 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8904 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8905 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8906 }
8907
8908 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8909#endif
8910
8911 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8912 return rcStrict;
8913}
8914
8915
8916#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8917/**
8918 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
8919 */
8920HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8921{
8922 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8923
8924 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8925 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8926 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8927 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8928 | CPUMCTX_EXTRN_HWVIRT
8929 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8930 AssertRCReturn(rc, rc);
8931
8932 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8933
8934 VMXVEXITINFO ExitInfo;
8935 RT_ZERO(ExitInfo);
8936 ExitInfo.uReason = pVmxTransient->uExitReason;
8937 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8938 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8939 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8940 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8941
8942 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
8943 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8944 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
8945 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8946 {
8947 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8948 rcStrict = VINF_SUCCESS;
8949 }
8950 return rcStrict;
8951}
8952
8953
8954/**
8955 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
8956 */
8957HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8958{
8959 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8960
8961 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
8962 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
8963 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8964 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8965 AssertRCReturn(rc, rc);
8966
8967 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8968
8969 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8970 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
8971 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8972 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8973 {
8974 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8975 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
8976 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
8977 }
8978 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8979 return rcStrict;
8980}
8981
8982
8983/**
8984 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
8985 */
8986HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8987{
8988 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8989
8990 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8991 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8992 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8993 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8994 | CPUMCTX_EXTRN_HWVIRT
8995 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8996 AssertRCReturn(rc, rc);
8997
8998 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8999
9000 VMXVEXITINFO ExitInfo;
9001 RT_ZERO(ExitInfo);
9002 ExitInfo.uReason = pVmxTransient->uExitReason;
9003 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9004 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9005 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9006 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9007
9008 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9009 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9010 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9011 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9012 {
9013 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9014 rcStrict = VINF_SUCCESS;
9015 }
9016 return rcStrict;
9017}
9018
9019
9020/**
9021 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9022 */
9023HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9024{
9025 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9026
9027 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9028 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9029 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9030 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9031 | CPUMCTX_EXTRN_HWVIRT
9032 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9033 AssertRCReturn(rc, rc);
9034
9035 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9036
9037 VMXVEXITINFO ExitInfo;
9038 RT_ZERO(ExitInfo);
9039 ExitInfo.uReason = pVmxTransient->uExitReason;
9040 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9041 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9042 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9043 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9044
9045 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9046 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9047 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9048 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9049 {
9050 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9051 rcStrict = VINF_SUCCESS;
9052 }
9053 return rcStrict;
9054}
9055
9056
9057/**
9058 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9059 */
9060HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9061{
9062 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9063
9064 /*
9065 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9066 * thus might not need to import the shadow VMCS state, it's safer just in case
9067 * code elsewhere dares look at unsynced VMCS fields.
9068 */
9069 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9070 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9071 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9072 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9073 | CPUMCTX_EXTRN_HWVIRT
9074 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9075 AssertRCReturn(rc, rc);
9076
9077 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9078
9079 VMXVEXITINFO ExitInfo;
9080 RT_ZERO(ExitInfo);
9081 ExitInfo.uReason = pVmxTransient->uExitReason;
9082 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9083 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9084 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9085 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9086 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9087
9088 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9089 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9090 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9091 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9092 {
9093 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9094 rcStrict = VINF_SUCCESS;
9095 }
9096 return rcStrict;
9097}
9098
9099
9100/**
9101 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9102 */
9103HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9104{
9105 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9106
9107 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9108 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9109 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9110 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9111 AssertRCReturn(rc, rc);
9112
9113 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9114
9115 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9116 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9117 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9118 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9119 {
9120 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9121 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9122 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9123 }
9124 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9125 return rcStrict;
9126}
9127
9128
9129/**
9130 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9131 */
9132HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9133{
9134 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9135
9136 /*
9137 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9138 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9139 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9140 */
9141 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9142 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9143 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9144 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9145 | CPUMCTX_EXTRN_HWVIRT
9146 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9147 AssertRCReturn(rc, rc);
9148
9149 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9150
9151 VMXVEXITINFO ExitInfo;
9152 RT_ZERO(ExitInfo);
9153 ExitInfo.uReason = pVmxTransient->uExitReason;
9154 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9155 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9156 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9157 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9158 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9159
9160 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9161 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9162 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9163 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9164 {
9165 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9166 rcStrict = VINF_SUCCESS;
9167 }
9168 return rcStrict;
9169}
9170
9171
9172/**
9173 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9174 */
9175HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9176{
9177 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9178
9179 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9180 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9181 | CPUMCTX_EXTRN_HWVIRT
9182 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9183 AssertRCReturn(rc, rc);
9184
9185 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9186
9187 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9188 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9189 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9190 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9191 {
9192 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9193 rcStrict = VINF_SUCCESS;
9194 }
9195 return rcStrict;
9196}
9197
9198
9199/**
9200 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9201 */
9202HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9203{
9204 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9205
9206 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9207 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9208 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9209 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9210 | CPUMCTX_EXTRN_HWVIRT
9211 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9212 AssertRCReturn(rc, rc);
9213
9214 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9215
9216 VMXVEXITINFO ExitInfo;
9217 RT_ZERO(ExitInfo);
9218 ExitInfo.uReason = pVmxTransient->uExitReason;
9219 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9220 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9221 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9222 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9223
9224 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9225 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9226 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9227 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9228 {
9229 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9230 rcStrict = VINF_SUCCESS;
9231 }
9232 return rcStrict;
9233}
9234
9235
9236/**
9237 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9238 */
9239HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9240{
9241 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9242
9243 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9244 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9245 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9246 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9247 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9248 AssertRCReturn(rc, rc);
9249
9250 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9251
9252 VMXVEXITINFO ExitInfo;
9253 RT_ZERO(ExitInfo);
9254 ExitInfo.uReason = pVmxTransient->uExitReason;
9255 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9256 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9257 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9258 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9259
9260 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9261 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9262 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9263 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9264 {
9265 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9266 rcStrict = VINF_SUCCESS;
9267 }
9268 return rcStrict;
9269}
9270
9271
9272# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9273/**
9274 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9275 */
9276HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9277{
9278 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9279
9280 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9281 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9282 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9283 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9284 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9285 AssertRCReturn(rc, rc);
9286
9287 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9288
9289 VMXVEXITINFO ExitInfo;
9290 RT_ZERO(ExitInfo);
9291 ExitInfo.uReason = pVmxTransient->uExitReason;
9292 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9293 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9294 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9295 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9296
9297 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9298 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9299 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9300 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9301 {
9302 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9303 rcStrict = VINF_SUCCESS;
9304 }
9305 return rcStrict;
9306}
9307# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9308#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9309/** @} */
9310
9311
9312#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9313/** @name Nested-guest VM-exit handlers.
9314 * @{
9315 */
9316/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9317/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9318/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9319
9320/**
9321 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9322 * Conditional VM-exit.
9323 */
9324HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9325{
9326 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9327
9328 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
9329
9330 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9331 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9332 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9333
9334 switch (uExitIntType)
9335 {
9336#ifndef IN_NEM_DARWIN
9337 /*
9338 * Physical NMIs:
9339 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9340 */
9341 case VMX_EXIT_INT_INFO_TYPE_NMI:
9342 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9343#endif
9344
9345 /*
9346 * Hardware exceptions,
9347 * Software exceptions,
9348 * Privileged software exceptions:
9349 * Figure out if the exception must be delivered to the guest or the nested-guest.
9350 */
9351 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9352 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9353 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9354 {
9355 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
9356 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9357 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9358 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9359
9360 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9361 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
9362 pVmxTransient->uExitIntErrorCode);
9363 if (fIntercept)
9364 {
9365 /* Exit qualification is required for debug and page-fault exceptions. */
9366 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9367
9368 /*
9369 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9370 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9371 * length. However, if delivery of a software interrupt, software exception or privileged
9372 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9373 */
9374 VMXVEXITINFO ExitInfo;
9375 RT_ZERO(ExitInfo);
9376 ExitInfo.uReason = pVmxTransient->uExitReason;
9377 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9378 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9379
9380 VMXVEXITEVENTINFO ExitEventInfo;
9381 RT_ZERO(ExitEventInfo);
9382 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
9383 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
9384 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9385 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9386
9387#ifdef DEBUG_ramshankar
9388 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9389 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
9390 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9391 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9392 {
9393 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
9394 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9395 }
9396#endif
9397 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9398 }
9399
9400 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9401 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9402 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9403 }
9404
9405 /*
9406 * Software interrupts:
9407 * VM-exits cannot be caused by software interrupts.
9408 *
9409 * External interrupts:
9410 * This should only happen when "acknowledge external interrupts on VM-exit"
9411 * control is set. However, we never set this when executing a guest or
9412 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9413 * the guest.
9414 */
9415 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9416 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9417 default:
9418 {
9419 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9420 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9421 }
9422 }
9423}
9424
9425
9426/**
9427 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9428 * Unconditional VM-exit.
9429 */
9430HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9431{
9432 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9433 return IEMExecVmxVmexitTripleFault(pVCpu);
9434}
9435
9436
9437/**
9438 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9439 */
9440HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9441{
9442 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9443
9444 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9445 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9446 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9447}
9448
9449
9450/**
9451 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9452 */
9453HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9454{
9455 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9456
9457 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9458 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9459 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9460}
9461
9462
9463/**
9464 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9465 * Unconditional VM-exit.
9466 */
9467HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9468{
9469 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9470
9471 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9472 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9473 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9474 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9475
9476 VMXVEXITINFO ExitInfo;
9477 RT_ZERO(ExitInfo);
9478 ExitInfo.uReason = pVmxTransient->uExitReason;
9479 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9480 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9481
9482 VMXVEXITEVENTINFO ExitEventInfo;
9483 RT_ZERO(ExitEventInfo);
9484 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9485 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9486 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9487}
9488
9489
9490/**
9491 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9492 */
9493HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9494{
9495 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9496
9497 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9498 {
9499 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9500 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9501 }
9502 return vmxHCExitHlt(pVCpu, pVmxTransient);
9503}
9504
9505
9506/**
9507 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9508 */
9509HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9510{
9511 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9512
9513 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9514 {
9515 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9516 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9517
9518 VMXVEXITINFO ExitInfo;
9519 RT_ZERO(ExitInfo);
9520 ExitInfo.uReason = pVmxTransient->uExitReason;
9521 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9522 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9523 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9524 }
9525 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9526}
9527
9528
9529/**
9530 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9531 */
9532HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9533{
9534 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9535
9536 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9537 {
9538 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9539 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9540 }
9541 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9542}
9543
9544
9545/**
9546 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9547 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9548 */
9549HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9550{
9551 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9552
9553 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9554 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9555
9556 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9557
9558 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9559 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9560 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9561
9562 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9563 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9564 u64VmcsField &= UINT64_C(0xffffffff);
9565
9566 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9567 {
9568 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9569 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9570
9571 VMXVEXITINFO ExitInfo;
9572 RT_ZERO(ExitInfo);
9573 ExitInfo.uReason = pVmxTransient->uExitReason;
9574 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9575 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9576 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9577 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9578 }
9579
9580 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9581 return vmxHCExitVmread(pVCpu, pVmxTransient);
9582 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9583}
9584
9585
9586/**
9587 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9588 */
9589HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9590{
9591 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9592
9593 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9594 {
9595 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9596 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9597 }
9598
9599 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9600}
9601
9602
9603/**
9604 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9605 * Conditional VM-exit.
9606 */
9607HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9608{
9609 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9610
9611 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9612 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9613
9614 VBOXSTRICTRC rcStrict;
9615 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9616 switch (uAccessType)
9617 {
9618 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9619 {
9620 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9621 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9622 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9623 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9624
9625 bool fIntercept;
9626 switch (iCrReg)
9627 {
9628 case 0:
9629 case 4:
9630 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9631 break;
9632
9633 case 3:
9634 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9635 break;
9636
9637 case 8:
9638 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9639 break;
9640
9641 default:
9642 fIntercept = false;
9643 break;
9644 }
9645 if (fIntercept)
9646 {
9647 VMXVEXITINFO ExitInfo;
9648 RT_ZERO(ExitInfo);
9649 ExitInfo.uReason = pVmxTransient->uExitReason;
9650 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9651 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9652 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9653 }
9654 else
9655 {
9656 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9657 AssertRCReturn(rc, rc);
9658 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9659 }
9660 break;
9661 }
9662
9663 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9664 {
9665 /*
9666 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9667 * CR2 reads do not cause a VM-exit.
9668 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9669 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9670 */
9671 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9672 if ( iCrReg == 3
9673 || iCrReg == 8)
9674 {
9675 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9676 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9677 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9678 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9679 {
9680 VMXVEXITINFO ExitInfo;
9681 RT_ZERO(ExitInfo);
9682 ExitInfo.uReason = pVmxTransient->uExitReason;
9683 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9684 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9685 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9686 }
9687 else
9688 {
9689 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9690 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9691 }
9692 }
9693 else
9694 {
9695 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9696 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9697 }
9698 break;
9699 }
9700
9701 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9702 {
9703 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9704 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9705 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9706 if ( (uGstHostMask & X86_CR0_TS)
9707 && (uReadShadow & X86_CR0_TS))
9708 {
9709 VMXVEXITINFO ExitInfo;
9710 RT_ZERO(ExitInfo);
9711 ExitInfo.uReason = pVmxTransient->uExitReason;
9712 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9713 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9714 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9715 }
9716 else
9717 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9718 break;
9719 }
9720
9721 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9722 {
9723 RTGCPTR GCPtrEffDst;
9724 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9725 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9726 if (fMemOperand)
9727 {
9728 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9729 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9730 }
9731 else
9732 GCPtrEffDst = NIL_RTGCPTR;
9733
9734 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9735 {
9736 VMXVEXITINFO ExitInfo;
9737 RT_ZERO(ExitInfo);
9738 ExitInfo.uReason = pVmxTransient->uExitReason;
9739 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9740 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9741 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9742 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9743 }
9744 else
9745 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9746 break;
9747 }
9748
9749 default:
9750 {
9751 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9752 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9753 }
9754 }
9755
9756 if (rcStrict == VINF_IEM_RAISED_XCPT)
9757 {
9758 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9759 rcStrict = VINF_SUCCESS;
9760 }
9761 return rcStrict;
9762}
9763
9764
9765/**
9766 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9767 * Conditional VM-exit.
9768 */
9769HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9770{
9771 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9772
9773 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9774 {
9775 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9776 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9777
9778 VMXVEXITINFO ExitInfo;
9779 RT_ZERO(ExitInfo);
9780 ExitInfo.uReason = pVmxTransient->uExitReason;
9781 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9782 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9783 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9784 }
9785 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9786}
9787
9788
9789/**
9790 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9791 * Conditional VM-exit.
9792 */
9793HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9794{
9795 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9796
9797 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9798
9799 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9800 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9801 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9802
9803 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9804 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9805 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9806 {
9807 /*
9808 * IN/OUT instruction:
9809 * - Provides VM-exit instruction length.
9810 *
9811 * INS/OUTS instruction:
9812 * - Provides VM-exit instruction length.
9813 * - Provides Guest-linear address.
9814 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9815 */
9816 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9817 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9818
9819 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9820 pVmxTransient->ExitInstrInfo.u = 0;
9821 pVmxTransient->uGuestLinearAddr = 0;
9822
9823 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9824 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9825 if (fIOString)
9826 {
9827 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9828 if (fVmxInsOutsInfo)
9829 {
9830 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9831 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9832 }
9833 }
9834
9835 VMXVEXITINFO ExitInfo;
9836 RT_ZERO(ExitInfo);
9837 ExitInfo.uReason = pVmxTransient->uExitReason;
9838 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9839 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9840 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9841 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
9842 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9843 }
9844 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9845}
9846
9847
9848/**
9849 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9850 */
9851HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9852{
9853 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9854
9855 uint32_t fMsrpm;
9856 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9857 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9858 else
9859 fMsrpm = VMXMSRPM_EXIT_RD;
9860
9861 if (fMsrpm & VMXMSRPM_EXIT_RD)
9862 {
9863 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9864 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9865 }
9866 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
9867}
9868
9869
9870/**
9871 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9872 */
9873HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9874{
9875 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9876
9877 uint32_t fMsrpm;
9878 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9879 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9880 else
9881 fMsrpm = VMXMSRPM_EXIT_WR;
9882
9883 if (fMsrpm & VMXMSRPM_EXIT_WR)
9884 {
9885 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9886 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9887 }
9888 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
9889}
9890
9891
9892/**
9893 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
9894 */
9895HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9896{
9897 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9898
9899 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
9900 {
9901 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9902 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9903 }
9904 return vmxHCExitMwait(pVCpu, pVmxTransient);
9905}
9906
9907
9908/**
9909 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
9910 * VM-exit.
9911 */
9912HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9913{
9914 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9915
9916 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
9917 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9918 VMXVEXITINFO ExitInfo;
9919 RT_ZERO(ExitInfo);
9920 ExitInfo.uReason = pVmxTransient->uExitReason;
9921 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9922 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9923}
9924
9925
9926/**
9927 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
9928 */
9929HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9930{
9931 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9932
9933 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
9934 {
9935 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9936 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9937 }
9938 return vmxHCExitMonitor(pVCpu, pVmxTransient);
9939}
9940
9941
9942/**
9943 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
9944 */
9945HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9946{
9947 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9948
9949 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
9950 * PAUSE when executing a nested-guest? If it does not, we would not need
9951 * to check for the intercepts here. Just call VM-exit... */
9952
9953 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
9954 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
9955 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
9956 {
9957 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9958 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9959 }
9960 return vmxHCExitPause(pVCpu, pVmxTransient);
9961}
9962
9963
9964/**
9965 * Nested-guest VM-exit handler for when the TPR value is lowered below the
9966 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
9967 */
9968HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9969{
9970 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9971
9972 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
9973 {
9974 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9975 VMXVEXITINFO ExitInfo;
9976 RT_ZERO(ExitInfo);
9977 ExitInfo.uReason = pVmxTransient->uExitReason;
9978 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9979 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9980 }
9981 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
9982}
9983
9984
9985/**
9986 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
9987 * VM-exit.
9988 */
9989HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9990{
9991 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9992
9993 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9994 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9995 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9996 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9997
9998 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
9999
10000 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10001 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10002
10003 VMXVEXITINFO ExitInfo;
10004 RT_ZERO(ExitInfo);
10005 ExitInfo.uReason = pVmxTransient->uExitReason;
10006 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10007 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10008
10009 VMXVEXITEVENTINFO ExitEventInfo;
10010 RT_ZERO(ExitEventInfo);
10011 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10012 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10013 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10014}
10015
10016
10017/**
10018 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10019 * Conditional VM-exit.
10020 */
10021HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10022{
10023 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10024
10025 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10026 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10027 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10028}
10029
10030
10031/**
10032 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10033 * Conditional VM-exit.
10034 */
10035HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10036{
10037 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10038
10039 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10040 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10041 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10042}
10043
10044
10045/**
10046 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10047 */
10048HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10049{
10050 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10051
10052 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10053 {
10054 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10055 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10056 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10057 }
10058 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10059}
10060
10061
10062/**
10063 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10064 */
10065HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10066{
10067 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10068
10069 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10070 {
10071 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10072 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10073 }
10074 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10075}
10076
10077
10078/**
10079 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10080 */
10081HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10082{
10083 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10084
10085 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10086 {
10087 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10088 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10089 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10090 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10091
10092 VMXVEXITINFO ExitInfo;
10093 RT_ZERO(ExitInfo);
10094 ExitInfo.uReason = pVmxTransient->uExitReason;
10095 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10096 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10097 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10098 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10099 }
10100 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10101}
10102
10103
10104/**
10105 * Nested-guest VM-exit handler for invalid-guest state
10106 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10107 */
10108HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10109{
10110 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10111
10112 /*
10113 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10114 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10115 * Handle it like it's in an invalid guest state of the outer guest.
10116 *
10117 * When the fast path is implemented, this should be changed to cause the corresponding
10118 * nested-guest VM-exit.
10119 */
10120 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10121}
10122
10123
10124/**
10125 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
10126 * and only provide the instruction length.
10127 *
10128 * Unconditional VM-exit.
10129 */
10130HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10131{
10132 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10133
10134#ifdef VBOX_STRICT
10135 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10136 switch (pVmxTransient->uExitReason)
10137 {
10138 case VMX_EXIT_ENCLS:
10139 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10140 break;
10141
10142 case VMX_EXIT_VMFUNC:
10143 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10144 break;
10145 }
10146#endif
10147
10148 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10149 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10150}
10151
10152
10153/**
10154 * Nested-guest VM-exit handler for instructions that provide instruction length as
10155 * well as more information.
10156 *
10157 * Unconditional VM-exit.
10158 */
10159HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10160{
10161 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10162
10163#ifdef VBOX_STRICT
10164 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10165 switch (pVmxTransient->uExitReason)
10166 {
10167 case VMX_EXIT_GDTR_IDTR_ACCESS:
10168 case VMX_EXIT_LDTR_TR_ACCESS:
10169 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10170 break;
10171
10172 case VMX_EXIT_RDRAND:
10173 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10174 break;
10175
10176 case VMX_EXIT_RDSEED:
10177 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10178 break;
10179
10180 case VMX_EXIT_XSAVES:
10181 case VMX_EXIT_XRSTORS:
10182 /** @todo NSTVMX: Verify XSS-bitmap. */
10183 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10184 break;
10185
10186 case VMX_EXIT_UMWAIT:
10187 case VMX_EXIT_TPAUSE:
10188 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10189 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10190 break;
10191
10192 case VMX_EXIT_LOADIWKEY:
10193 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10194 break;
10195 }
10196#endif
10197
10198 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10199 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10200 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10201
10202 VMXVEXITINFO ExitInfo;
10203 RT_ZERO(ExitInfo);
10204 ExitInfo.uReason = pVmxTransient->uExitReason;
10205 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10206 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10207 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10208 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10209}
10210
10211/** @} */
10212#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10213
10214
10215/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10216 * probes.
10217 *
10218 * The following few functions and associated structure contains the bloat
10219 * necessary for providing detailed debug events and dtrace probes as well as
10220 * reliable host side single stepping. This works on the principle of
10221 * "subclassing" the normal execution loop and workers. We replace the loop
10222 * method completely and override selected helpers to add necessary adjustments
10223 * to their core operation.
10224 *
10225 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10226 * any performance for debug and analysis features.
10227 *
10228 * @{
10229 */
10230
10231/**
10232 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10233 * the debug run loop.
10234 */
10235typedef struct VMXRUNDBGSTATE
10236{
10237 /** The RIP we started executing at. This is for detecting that we stepped. */
10238 uint64_t uRipStart;
10239 /** The CS we started executing with. */
10240 uint16_t uCsStart;
10241
10242 /** Whether we've actually modified the 1st execution control field. */
10243 bool fModifiedProcCtls : 1;
10244 /** Whether we've actually modified the 2nd execution control field. */
10245 bool fModifiedProcCtls2 : 1;
10246 /** Whether we've actually modified the exception bitmap. */
10247 bool fModifiedXcptBitmap : 1;
10248
10249 /** We desire the modified the CR0 mask to be cleared. */
10250 bool fClearCr0Mask : 1;
10251 /** We desire the modified the CR4 mask to be cleared. */
10252 bool fClearCr4Mask : 1;
10253 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10254 uint32_t fCpe1Extra;
10255 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10256 uint32_t fCpe1Unwanted;
10257 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10258 uint32_t fCpe2Extra;
10259 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10260 uint32_t bmXcptExtra;
10261 /** The sequence number of the Dtrace provider settings the state was
10262 * configured against. */
10263 uint32_t uDtraceSettingsSeqNo;
10264 /** VM-exits to check (one bit per VM-exit). */
10265 uint32_t bmExitsToCheck[3];
10266
10267 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10268 uint32_t fProcCtlsInitial;
10269 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10270 uint32_t fProcCtls2Initial;
10271 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10272 uint32_t bmXcptInitial;
10273} VMXRUNDBGSTATE;
10274AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10275typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10276
10277
10278/**
10279 * Initializes the VMXRUNDBGSTATE structure.
10280 *
10281 * @param pVCpu The cross context virtual CPU structure of the
10282 * calling EMT.
10283 * @param pVmxTransient The VMX-transient structure.
10284 * @param pDbgState The debug state to initialize.
10285 */
10286static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10287{
10288 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10289 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10290
10291 pDbgState->fModifiedProcCtls = false;
10292 pDbgState->fModifiedProcCtls2 = false;
10293 pDbgState->fModifiedXcptBitmap = false;
10294 pDbgState->fClearCr0Mask = false;
10295 pDbgState->fClearCr4Mask = false;
10296 pDbgState->fCpe1Extra = 0;
10297 pDbgState->fCpe1Unwanted = 0;
10298 pDbgState->fCpe2Extra = 0;
10299 pDbgState->bmXcptExtra = 0;
10300 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10301 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10302 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10303}
10304
10305
10306/**
10307 * Updates the VMSC fields with changes requested by @a pDbgState.
10308 *
10309 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10310 * immediately before executing guest code, i.e. when interrupts are disabled.
10311 * We don't check status codes here as we cannot easily assert or return in the
10312 * latter case.
10313 *
10314 * @param pVCpu The cross context virtual CPU structure.
10315 * @param pVmxTransient The VMX-transient structure.
10316 * @param pDbgState The debug state.
10317 */
10318static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10319{
10320 /*
10321 * Ensure desired flags in VMCS control fields are set.
10322 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10323 *
10324 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10325 * there should be no stale data in pCtx at this point.
10326 */
10327 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10328 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10329 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10330 {
10331 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10332 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10333 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10334 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10335 pDbgState->fModifiedProcCtls = true;
10336 }
10337
10338 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10339 {
10340 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10341 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10342 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10343 pDbgState->fModifiedProcCtls2 = true;
10344 }
10345
10346 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10347 {
10348 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10349 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10350 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10351 pDbgState->fModifiedXcptBitmap = true;
10352 }
10353
10354 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10355 {
10356 pVmcsInfo->u64Cr0Mask = 0;
10357 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10358 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10359 }
10360
10361 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10362 {
10363 pVmcsInfo->u64Cr4Mask = 0;
10364 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10365 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10366 }
10367
10368 NOREF(pVCpu);
10369}
10370
10371
10372/**
10373 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
10374 * re-entry next time around.
10375 *
10376 * @returns Strict VBox status code (i.e. informational status codes too).
10377 * @param pVCpu The cross context virtual CPU structure.
10378 * @param pVmxTransient The VMX-transient structure.
10379 * @param pDbgState The debug state.
10380 * @param rcStrict The return code from executing the guest using single
10381 * stepping.
10382 */
10383static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
10384 VBOXSTRICTRC rcStrict)
10385{
10386 /*
10387 * Restore VM-exit control settings as we may not reenter this function the
10388 * next time around.
10389 */
10390 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10391
10392 /* We reload the initial value, trigger what we can of recalculations the
10393 next time around. From the looks of things, that's all that's required atm. */
10394 if (pDbgState->fModifiedProcCtls)
10395 {
10396 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
10397 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
10398 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
10399 AssertRC(rc2);
10400 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
10401 }
10402
10403 /* We're currently the only ones messing with this one, so just restore the
10404 cached value and reload the field. */
10405 if ( pDbgState->fModifiedProcCtls2
10406 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
10407 {
10408 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
10409 AssertRC(rc2);
10410 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
10411 }
10412
10413 /* If we've modified the exception bitmap, we restore it and trigger
10414 reloading and partial recalculation the next time around. */
10415 if (pDbgState->fModifiedXcptBitmap)
10416 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
10417
10418 return rcStrict;
10419}
10420
10421
10422/**
10423 * Configures VM-exit controls for current DBGF and DTrace settings.
10424 *
10425 * This updates @a pDbgState and the VMCS execution control fields to reflect
10426 * the necessary VM-exits demanded by DBGF and DTrace.
10427 *
10428 * @param pVCpu The cross context virtual CPU structure.
10429 * @param pVmxTransient The VMX-transient structure. May update
10430 * fUpdatedTscOffsettingAndPreemptTimer.
10431 * @param pDbgState The debug state.
10432 */
10433static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10434{
10435#ifndef IN_NEM_DARWIN
10436 /*
10437 * Take down the dtrace serial number so we can spot changes.
10438 */
10439 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
10440 ASMCompilerBarrier();
10441#endif
10442
10443 /*
10444 * We'll rebuild most of the middle block of data members (holding the
10445 * current settings) as we go along here, so start by clearing it all.
10446 */
10447 pDbgState->bmXcptExtra = 0;
10448 pDbgState->fCpe1Extra = 0;
10449 pDbgState->fCpe1Unwanted = 0;
10450 pDbgState->fCpe2Extra = 0;
10451 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
10452 pDbgState->bmExitsToCheck[i] = 0;
10453
10454 /*
10455 * Software interrupts (INT XXh) - no idea how to trigger these...
10456 */
10457 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10458 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
10459 || VBOXVMM_INT_SOFTWARE_ENABLED())
10460 {
10461 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10462 }
10463
10464 /*
10465 * INT3 breakpoints - triggered by #BP exceptions.
10466 */
10467 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
10468 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10469
10470 /*
10471 * Exception bitmap and XCPT events+probes.
10472 */
10473 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
10474 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
10475 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
10476
10477 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
10478 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
10479 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10480 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
10481 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
10482 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
10483 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
10484 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
10485 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
10486 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
10487 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
10488 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
10489 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
10490 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
10491 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
10492 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
10493 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
10494 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
10495
10496 if (pDbgState->bmXcptExtra)
10497 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10498
10499 /*
10500 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
10501 *
10502 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
10503 * So, when adding/changing/removing please don't forget to update it.
10504 *
10505 * Some of the macros are picking up local variables to save horizontal space,
10506 * (being able to see it in a table is the lesser evil here).
10507 */
10508#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
10509 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
10510 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
10511#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
10512 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10513 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10514 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10515 } else do { } while (0)
10516#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
10517 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10518 { \
10519 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
10520 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10521 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10522 } else do { } while (0)
10523#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
10524 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10525 { \
10526 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
10527 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10528 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10529 } else do { } while (0)
10530#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
10531 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10532 { \
10533 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
10534 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10535 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10536 } else do { } while (0)
10537
10538 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
10539 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
10540 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
10541 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
10542 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
10543
10544 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
10545 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
10546 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
10547 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
10548 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
10549 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
10550 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
10551 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
10552 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
10553 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
10554 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
10555 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
10556 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
10557 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
10558 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
10559 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
10560 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
10561 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
10562 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
10563 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
10564 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
10565 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
10566 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
10567 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
10568 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
10569 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
10570 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
10571 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
10572 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
10573 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
10574 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
10575 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
10576 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
10577 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
10578 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
10579 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
10580
10581 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
10582 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10583 {
10584 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
10585 | CPUMCTX_EXTRN_APIC_TPR);
10586 AssertRC(rc);
10587
10588#if 0 /** @todo fix me */
10589 pDbgState->fClearCr0Mask = true;
10590 pDbgState->fClearCr4Mask = true;
10591#endif
10592 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
10593 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
10594 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10595 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10596 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
10597 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
10598 require clearing here and in the loop if we start using it. */
10599 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
10600 }
10601 else
10602 {
10603 if (pDbgState->fClearCr0Mask)
10604 {
10605 pDbgState->fClearCr0Mask = false;
10606 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
10607 }
10608 if (pDbgState->fClearCr4Mask)
10609 {
10610 pDbgState->fClearCr4Mask = false;
10611 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
10612 }
10613 }
10614 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
10615 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
10616
10617 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
10618 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
10619 {
10620 /** @todo later, need to fix handler as it assumes this won't usually happen. */
10621 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
10622 }
10623 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
10624 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
10625
10626 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
10627 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
10628 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
10629 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
10630 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
10631 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
10632 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
10633 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
10634#if 0 /** @todo too slow, fix handler. */
10635 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
10636#endif
10637 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
10638
10639 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
10640 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
10641 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
10642 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
10643 {
10644 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10645 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
10646 }
10647 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10648 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10649 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10650 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10651
10652 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
10653 || IS_EITHER_ENABLED(pVM, INSTR_STR)
10654 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
10655 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
10656 {
10657 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10658 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
10659 }
10660 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
10661 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
10662 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
10663 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
10664
10665 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
10666 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
10667 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
10668 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
10669 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
10670 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
10671 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
10672 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
10673 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
10674 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
10675 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
10676 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
10677 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
10678 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
10679 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
10680 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
10681 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
10682 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
10683 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
10684 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
10685 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
10686 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
10687
10688#undef IS_EITHER_ENABLED
10689#undef SET_ONLY_XBM_IF_EITHER_EN
10690#undef SET_CPE1_XBM_IF_EITHER_EN
10691#undef SET_CPEU_XBM_IF_EITHER_EN
10692#undef SET_CPE2_XBM_IF_EITHER_EN
10693
10694 /*
10695 * Sanitize the control stuff.
10696 */
10697 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
10698 if (pDbgState->fCpe2Extra)
10699 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
10700 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
10701 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
10702#ifndef IN_NEM_DARWIN /** @todo */
10703 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10704 {
10705 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
10706 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10707 }
10708#endif
10709
10710 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
10711 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
10712 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
10713 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
10714}
10715
10716
10717/**
10718 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
10719 * appropriate.
10720 *
10721 * The caller has checked the VM-exit against the
10722 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
10723 * already, so we don't have to do that either.
10724 *
10725 * @returns Strict VBox status code (i.e. informational status codes too).
10726 * @param pVCpu The cross context virtual CPU structure.
10727 * @param pVmxTransient The VMX-transient structure.
10728 * @param uExitReason The VM-exit reason.
10729 *
10730 * @remarks The name of this function is displayed by dtrace, so keep it short
10731 * and to the point. No longer than 33 chars long, please.
10732 */
10733static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
10734{
10735 /*
10736 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
10737 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
10738 *
10739 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
10740 * does. Must add/change/remove both places. Same ordering, please.
10741 *
10742 * Added/removed events must also be reflected in the next section
10743 * where we dispatch dtrace events.
10744 */
10745 bool fDtrace1 = false;
10746 bool fDtrace2 = false;
10747 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
10748 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
10749 uint32_t uEventArg = 0;
10750#define SET_EXIT(a_EventSubName) \
10751 do { \
10752 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10753 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10754 } while (0)
10755#define SET_BOTH(a_EventSubName) \
10756 do { \
10757 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
10758 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10759 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
10760 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10761 } while (0)
10762 switch (uExitReason)
10763 {
10764 case VMX_EXIT_MTF:
10765 return vmxHCExitMtf(pVCpu, pVmxTransient);
10766
10767 case VMX_EXIT_XCPT_OR_NMI:
10768 {
10769 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
10770 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
10771 {
10772 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10773 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10774 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10775 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
10776 {
10777 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
10778 {
10779 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
10780 uEventArg = pVmxTransient->uExitIntErrorCode;
10781 }
10782 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
10783 switch (enmEvent1)
10784 {
10785 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
10786 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
10787 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
10788 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
10789 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
10790 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
10791 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
10792 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
10793 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
10794 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
10795 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
10796 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
10797 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
10798 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
10799 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
10800 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
10801 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
10802 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
10803 default: break;
10804 }
10805 }
10806 else
10807 AssertFailed();
10808 break;
10809
10810 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10811 uEventArg = idxVector;
10812 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
10813 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
10814 break;
10815 }
10816 break;
10817 }
10818
10819 case VMX_EXIT_TRIPLE_FAULT:
10820 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
10821 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
10822 break;
10823 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
10824 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
10825 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
10826 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
10827 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
10828
10829 /* Instruction specific VM-exits: */
10830 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
10831 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
10832 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
10833 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
10834 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
10835 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
10836 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
10837 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
10838 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
10839 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
10840 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
10841 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
10842 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
10843 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
10844 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
10845 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
10846 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
10847 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
10848 case VMX_EXIT_MOV_CRX:
10849 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10850 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
10851 SET_BOTH(CRX_READ);
10852 else
10853 SET_BOTH(CRX_WRITE);
10854 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10855 break;
10856 case VMX_EXIT_MOV_DRX:
10857 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10858 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
10859 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
10860 SET_BOTH(DRX_READ);
10861 else
10862 SET_BOTH(DRX_WRITE);
10863 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
10864 break;
10865 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
10866 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
10867 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
10868 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
10869 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
10870 case VMX_EXIT_GDTR_IDTR_ACCESS:
10871 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10872 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
10873 {
10874 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
10875 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
10876 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
10877 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
10878 }
10879 break;
10880
10881 case VMX_EXIT_LDTR_TR_ACCESS:
10882 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10883 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
10884 {
10885 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
10886 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
10887 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
10888 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
10889 }
10890 break;
10891
10892 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
10893 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
10894 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
10895 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
10896 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
10897 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
10898 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
10899 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
10900 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
10901 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
10902 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
10903
10904 /* Events that aren't relevant at this point. */
10905 case VMX_EXIT_EXT_INT:
10906 case VMX_EXIT_INT_WINDOW:
10907 case VMX_EXIT_NMI_WINDOW:
10908 case VMX_EXIT_TPR_BELOW_THRESHOLD:
10909 case VMX_EXIT_PREEMPT_TIMER:
10910 case VMX_EXIT_IO_INSTR:
10911 break;
10912
10913 /* Errors and unexpected events. */
10914 case VMX_EXIT_INIT_SIGNAL:
10915 case VMX_EXIT_SIPI:
10916 case VMX_EXIT_IO_SMI:
10917 case VMX_EXIT_SMI:
10918 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
10919 case VMX_EXIT_ERR_MSR_LOAD:
10920 case VMX_EXIT_ERR_MACHINE_CHECK:
10921 case VMX_EXIT_PML_FULL:
10922 case VMX_EXIT_VIRTUALIZED_EOI:
10923 break;
10924
10925 default:
10926 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
10927 break;
10928 }
10929#undef SET_BOTH
10930#undef SET_EXIT
10931
10932 /*
10933 * Dtrace tracepoints go first. We do them here at once so we don't
10934 * have to copy the guest state saving and stuff a few dozen times.
10935 * Down side is that we've got to repeat the switch, though this time
10936 * we use enmEvent since the probes are a subset of what DBGF does.
10937 */
10938 if (fDtrace1 || fDtrace2)
10939 {
10940 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10941 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10942 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10943 switch (enmEvent1)
10944 {
10945 /** @todo consider which extra parameters would be helpful for each probe. */
10946 case DBGFEVENT_END: break;
10947 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
10948 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
10949 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
10950 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
10951 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
10952 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
10953 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
10954 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
10955 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
10956 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
10957 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
10958 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
10959 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
10960 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
10961 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
10962 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
10963 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
10964 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
10965 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
10966 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
10967 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
10968 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
10969 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
10970 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
10971 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
10972 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
10973 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
10974 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
10975 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
10976 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
10977 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
10978 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
10979 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
10980 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
10981 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
10982 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
10983 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
10984 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
10985 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
10986 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
10987 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
10988 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
10989 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
10990 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
10991 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
10992 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
10993 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
10994 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
10995 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
10996 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
10997 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
10998 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
10999 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11000 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11001 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11002 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11003 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11004 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11005 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11006 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11007 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11008 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11009 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11010 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11011 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11012 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11013 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11014 }
11015 switch (enmEvent2)
11016 {
11017 /** @todo consider which extra parameters would be helpful for each probe. */
11018 case DBGFEVENT_END: break;
11019 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11020 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11021 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11022 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11023 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11024 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11025 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11026 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11027 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11028 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11029 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11030 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11031 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11032 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11033 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11034 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11035 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11036 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11037 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11038 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11039 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11040 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11041 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11042 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11043 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11044 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11045 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11046 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11047 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11048 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11049 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11050 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11051 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11052 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11053 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11054 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11055 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11056 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11057 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11058 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11059 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11060 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11061 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11062 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11063 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11064 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11065 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11066 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11067 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11068 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11069 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11070 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11071 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11072 }
11073 }
11074
11075 /*
11076 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11077 * the DBGF call will do a full check).
11078 *
11079 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11080 * Note! If we have to events, we prioritize the first, i.e. the instruction
11081 * one, in order to avoid event nesting.
11082 */
11083 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11084 if ( enmEvent1 != DBGFEVENT_END
11085 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11086 {
11087 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11088 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11089 if (rcStrict != VINF_SUCCESS)
11090 return rcStrict;
11091 }
11092 else if ( enmEvent2 != DBGFEVENT_END
11093 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11094 {
11095 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11096 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11097 if (rcStrict != VINF_SUCCESS)
11098 return rcStrict;
11099 }
11100
11101 return VINF_SUCCESS;
11102}
11103
11104
11105/**
11106 * Single-stepping VM-exit filtering.
11107 *
11108 * This is preprocessing the VM-exits and deciding whether we've gotten far
11109 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11110 * handling is performed.
11111 *
11112 * @returns Strict VBox status code (i.e. informational status codes too).
11113 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11114 * @param pVmxTransient The VMX-transient structure.
11115 * @param pDbgState The debug state.
11116 */
11117DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11118{
11119 /*
11120 * Expensive (saves context) generic dtrace VM-exit probe.
11121 */
11122 uint32_t const uExitReason = pVmxTransient->uExitReason;
11123 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11124 { /* more likely */ }
11125 else
11126 {
11127 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11128 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11129 AssertRC(rc);
11130 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11131 }
11132
11133#ifndef IN_NEM_DARWIN
11134 /*
11135 * Check for host NMI, just to get that out of the way.
11136 */
11137 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11138 { /* normally likely */ }
11139 else
11140 {
11141 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
11142 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11143 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11144 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11145 }
11146#endif
11147
11148 /*
11149 * Check for single stepping event if we're stepping.
11150 */
11151 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11152 {
11153 switch (uExitReason)
11154 {
11155 case VMX_EXIT_MTF:
11156 return vmxHCExitMtf(pVCpu, pVmxTransient);
11157
11158 /* Various events: */
11159 case VMX_EXIT_XCPT_OR_NMI:
11160 case VMX_EXIT_EXT_INT:
11161 case VMX_EXIT_TRIPLE_FAULT:
11162 case VMX_EXIT_INT_WINDOW:
11163 case VMX_EXIT_NMI_WINDOW:
11164 case VMX_EXIT_TASK_SWITCH:
11165 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11166 case VMX_EXIT_APIC_ACCESS:
11167 case VMX_EXIT_EPT_VIOLATION:
11168 case VMX_EXIT_EPT_MISCONFIG:
11169 case VMX_EXIT_PREEMPT_TIMER:
11170
11171 /* Instruction specific VM-exits: */
11172 case VMX_EXIT_CPUID:
11173 case VMX_EXIT_GETSEC:
11174 case VMX_EXIT_HLT:
11175 case VMX_EXIT_INVD:
11176 case VMX_EXIT_INVLPG:
11177 case VMX_EXIT_RDPMC:
11178 case VMX_EXIT_RDTSC:
11179 case VMX_EXIT_RSM:
11180 case VMX_EXIT_VMCALL:
11181 case VMX_EXIT_VMCLEAR:
11182 case VMX_EXIT_VMLAUNCH:
11183 case VMX_EXIT_VMPTRLD:
11184 case VMX_EXIT_VMPTRST:
11185 case VMX_EXIT_VMREAD:
11186 case VMX_EXIT_VMRESUME:
11187 case VMX_EXIT_VMWRITE:
11188 case VMX_EXIT_VMXOFF:
11189 case VMX_EXIT_VMXON:
11190 case VMX_EXIT_MOV_CRX:
11191 case VMX_EXIT_MOV_DRX:
11192 case VMX_EXIT_IO_INSTR:
11193 case VMX_EXIT_RDMSR:
11194 case VMX_EXIT_WRMSR:
11195 case VMX_EXIT_MWAIT:
11196 case VMX_EXIT_MONITOR:
11197 case VMX_EXIT_PAUSE:
11198 case VMX_EXIT_GDTR_IDTR_ACCESS:
11199 case VMX_EXIT_LDTR_TR_ACCESS:
11200 case VMX_EXIT_INVEPT:
11201 case VMX_EXIT_RDTSCP:
11202 case VMX_EXIT_INVVPID:
11203 case VMX_EXIT_WBINVD:
11204 case VMX_EXIT_XSETBV:
11205 case VMX_EXIT_RDRAND:
11206 case VMX_EXIT_INVPCID:
11207 case VMX_EXIT_VMFUNC:
11208 case VMX_EXIT_RDSEED:
11209 case VMX_EXIT_XSAVES:
11210 case VMX_EXIT_XRSTORS:
11211 {
11212 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11213 AssertRCReturn(rc, rc);
11214 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11215 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11216 return VINF_EM_DBG_STEPPED;
11217 break;
11218 }
11219
11220 /* Errors and unexpected events: */
11221 case VMX_EXIT_INIT_SIGNAL:
11222 case VMX_EXIT_SIPI:
11223 case VMX_EXIT_IO_SMI:
11224 case VMX_EXIT_SMI:
11225 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11226 case VMX_EXIT_ERR_MSR_LOAD:
11227 case VMX_EXIT_ERR_MACHINE_CHECK:
11228 case VMX_EXIT_PML_FULL:
11229 case VMX_EXIT_VIRTUALIZED_EOI:
11230 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11231 break;
11232
11233 default:
11234 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11235 break;
11236 }
11237 }
11238
11239 /*
11240 * Check for debugger event breakpoints and dtrace probes.
11241 */
11242 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11243 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11244 {
11245 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11246 if (rcStrict != VINF_SUCCESS)
11247 return rcStrict;
11248 }
11249
11250 /*
11251 * Normal processing.
11252 */
11253#ifdef HMVMX_USE_FUNCTION_TABLE
11254 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11255#else
11256 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11257#endif
11258}
11259
11260/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette