VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 93922

Last change on this file since 93922 was 93922, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 EPT VM-exit handling with HM ring-0 code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 490.5 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 93922 2022-02-24 15:14:31Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
23# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
24#endif
25
26
27#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
28# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
29#endif
30
31
32/** Use the function table. */
33#define HMVMX_USE_FUNCTION_TABLE
34
35/** Determine which tagged-TLB flush handler to use. */
36#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
37#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
38#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
39#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
40
41/**
42 * Flags to skip redundant reads of some common VMCS fields that are not part of
43 * the guest-CPU or VCPU state but are needed while handling VM-exits.
44 */
45#define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0)
46#define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1)
47#define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2)
48#define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3)
49#define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4)
50#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5)
51#define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6)
52#define HMVMX_READ_GUEST_LINEAR_ADDR RT_BIT_32(7)
53#define HMVMX_READ_GUEST_PHYSICAL_ADDR RT_BIT_32(8)
54#define HMVMX_READ_GUEST_PENDING_DBG_XCPTS RT_BIT_32(9)
55
56/** All the VMCS fields required for processing of exception/NMI VM-exits. */
57#define HMVMX_READ_XCPT_INFO ( HMVMX_READ_EXIT_INTERRUPTION_INFO \
58 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE \
59 | HMVMX_READ_EXIT_INSTR_LEN \
60 | HMVMX_READ_IDT_VECTORING_INFO \
61 | HMVMX_READ_IDT_VECTORING_ERROR_CODE)
62
63/** Assert that all the given fields have been read from the VMCS. */
64#ifdef VBOX_STRICT
65# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
66 do { \
67 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
68 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
69 } while (0)
70#else
71# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
72#endif
73
74/**
75 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
76 * guest using hardware-assisted VMX.
77 *
78 * This excludes state like GPRs (other than RSP) which are always are
79 * swapped and restored across the world-switch and also registers like EFER,
80 * MSR which cannot be modified by the guest without causing a VM-exit.
81 */
82#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
83 | CPUMCTX_EXTRN_RFLAGS \
84 | CPUMCTX_EXTRN_RSP \
85 | CPUMCTX_EXTRN_SREG_MASK \
86 | CPUMCTX_EXTRN_TABLE_MASK \
87 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
88 | CPUMCTX_EXTRN_SYSCALL_MSRS \
89 | CPUMCTX_EXTRN_SYSENTER_MSRS \
90 | CPUMCTX_EXTRN_TSC_AUX \
91 | CPUMCTX_EXTRN_OTHER_MSRS \
92 | CPUMCTX_EXTRN_CR0 \
93 | CPUMCTX_EXTRN_CR3 \
94 | CPUMCTX_EXTRN_CR4 \
95 | CPUMCTX_EXTRN_DR7 \
96 | CPUMCTX_EXTRN_HWVIRT \
97 | CPUMCTX_EXTRN_INHIBIT_INT \
98 | CPUMCTX_EXTRN_INHIBIT_NMI)
99
100/**
101 * Exception bitmap mask for real-mode guests (real-on-v86).
102 *
103 * We need to intercept all exceptions manually except:
104 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
105 * due to bugs in Intel CPUs.
106 * - \#PF need not be intercepted even in real-mode if we have nested paging
107 * support.
108 */
109#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
110 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
111 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
112 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
113 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
114 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
115 | RT_BIT(X86_XCPT_XF))
116
117/** Maximum VM-instruction error number. */
118#define HMVMX_INSTR_ERROR_MAX 28
119
120/** Profiling macro. */
121#ifdef HM_PROFILE_EXIT_DISPATCH
122# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
123# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
124#else
125# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
126# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
127#endif
128
129#ifndef IN_NEM_DARWIN
130/** Assert that preemption is disabled or covered by thread-context hooks. */
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
132 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
133
134/** Assert that we haven't migrated CPUs when thread-context hooks are not
135 * used. */
136# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
137 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
138 ("Illegal migration! Entered on CPU %u Current %u\n", \
139 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
140#else
141# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
142# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
143#endif
144
145/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
146 * context. */
147#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
148 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
149 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
150
151/** Log the VM-exit reason with an easily visible marker to identify it in a
152 * potential sea of logging data. */
153#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
154 do { \
155 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
156 HMGetVmxExitName(a_uExitReason))); \
157 } while (0) \
158
159
160/*********************************************************************************************************************************
161* Structures and Typedefs *
162*********************************************************************************************************************************/
163/**
164 * Memory operand read or write access.
165 */
166typedef enum VMXMEMACCESS
167{
168 VMXMEMACCESS_READ = 0,
169 VMXMEMACCESS_WRITE = 1
170} VMXMEMACCESS;
171
172
173/**
174 * VMX VM-exit handler.
175 *
176 * @returns Strict VBox status code (i.e. informational status codes too).
177 * @param pVCpu The cross context virtual CPU structure.
178 * @param pVmxTransient The VMX-transient structure.
179 */
180#ifndef HMVMX_USE_FUNCTION_TABLE
181typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
182#else
183typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
184/** Pointer to VM-exit handler. */
185typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
186#endif
187
188/**
189 * VMX VM-exit handler, non-strict status code.
190 *
191 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
192 *
193 * @returns VBox status code, no informational status code returned.
194 * @param pVCpu The cross context virtual CPU structure.
195 * @param pVmxTransient The VMX-transient structure.
196 *
197 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
198 * use of that status code will be replaced with VINF_EM_SOMETHING
199 * later when switching over to IEM.
200 */
201#ifndef HMVMX_USE_FUNCTION_TABLE
202typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203#else
204typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
205#endif
206
207
208/*********************************************************************************************************************************
209* Internal Functions *
210*********************************************************************************************************************************/
211#ifndef HMVMX_USE_FUNCTION_TABLE
212DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
213# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
214# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
215#else
216# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
217# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
218#endif
219#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
220DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
221#endif
222
223static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
224
225/** @name VM-exit handler prototypes.
226 * @{
227 */
228static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
229static FNVMXEXITHANDLER vmxHCExitExtInt;
230static FNVMXEXITHANDLER vmxHCExitTripleFault;
231static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
232static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
233static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
234static FNVMXEXITHANDLER vmxHCExitCpuid;
235static FNVMXEXITHANDLER vmxHCExitGetsec;
236static FNVMXEXITHANDLER vmxHCExitHlt;
237static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
238static FNVMXEXITHANDLER vmxHCExitInvlpg;
239static FNVMXEXITHANDLER vmxHCExitRdpmc;
240static FNVMXEXITHANDLER vmxHCExitVmcall;
241#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
242static FNVMXEXITHANDLER vmxHCExitVmclear;
243static FNVMXEXITHANDLER vmxHCExitVmlaunch;
244static FNVMXEXITHANDLER vmxHCExitVmptrld;
245static FNVMXEXITHANDLER vmxHCExitVmptrst;
246static FNVMXEXITHANDLER vmxHCExitVmread;
247static FNVMXEXITHANDLER vmxHCExitVmresume;
248static FNVMXEXITHANDLER vmxHCExitVmwrite;
249static FNVMXEXITHANDLER vmxHCExitVmxoff;
250static FNVMXEXITHANDLER vmxHCExitVmxon;
251static FNVMXEXITHANDLER vmxHCExitInvvpid;
252# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
253static FNVMXEXITHANDLER vmxHCExitInvept;
254# endif
255#endif
256static FNVMXEXITHANDLER vmxHCExitRdtsc;
257static FNVMXEXITHANDLER vmxHCExitMovCRx;
258static FNVMXEXITHANDLER vmxHCExitMovDRx;
259static FNVMXEXITHANDLER vmxHCExitIoInstr;
260static FNVMXEXITHANDLER vmxHCExitRdmsr;
261static FNVMXEXITHANDLER vmxHCExitWrmsr;
262static FNVMXEXITHANDLER vmxHCExitMwait;
263static FNVMXEXITHANDLER vmxHCExitMtf;
264static FNVMXEXITHANDLER vmxHCExitMonitor;
265static FNVMXEXITHANDLER vmxHCExitPause;
266static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
267static FNVMXEXITHANDLER vmxHCExitApicAccess;
268static FNVMXEXITHANDLER vmxHCExitEptViolation;
269static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
270static FNVMXEXITHANDLER vmxHCExitRdtscp;
271static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
272static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
273static FNVMXEXITHANDLER vmxHCExitXsetbv;
274static FNVMXEXITHANDLER vmxHCExitInvpcid;
275static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
276static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
277static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
278/** @} */
279
280#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
281/** @name Nested-guest VM-exit handler prototypes.
282 * @{
283 */
284static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
285static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
286static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
287static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
288static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
289static FNVMXEXITHANDLER vmxHCExitHltNested;
290static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
291static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
292static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
293static FNVMXEXITHANDLER vmxHCExitRdtscNested;
294static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
295static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
296static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
297static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
298static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
299static FNVMXEXITHANDLER vmxHCExitMwaitNested;
300static FNVMXEXITHANDLER vmxHCExitMtfNested;
301static FNVMXEXITHANDLER vmxHCExitMonitorNested;
302static FNVMXEXITHANDLER vmxHCExitPauseNested;
303static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
304static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
305static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
306static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
307static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
308static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
309static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
310static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
311static FNVMXEXITHANDLER vmxHCExitInstrNested;
312static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
313# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
314static FNVMXEXITHANDLER vmxHCExitEptViolationNested;
315static FNVMXEXITHANDLER vmxHCExitEptMisconfigNested;
316# endif
317/** @} */
318#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
319
320
321/*********************************************************************************************************************************
322* Global Variables *
323*********************************************************************************************************************************/
324#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
325/**
326 * Array of all VMCS fields.
327 * Any fields added to the VT-x spec. should be added here.
328 *
329 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
330 * of nested-guests.
331 */
332static const uint32_t g_aVmcsFields[] =
333{
334 /* 16-bit control fields. */
335 VMX_VMCS16_VPID,
336 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
337 VMX_VMCS16_EPTP_INDEX,
338
339 /* 16-bit guest-state fields. */
340 VMX_VMCS16_GUEST_ES_SEL,
341 VMX_VMCS16_GUEST_CS_SEL,
342 VMX_VMCS16_GUEST_SS_SEL,
343 VMX_VMCS16_GUEST_DS_SEL,
344 VMX_VMCS16_GUEST_FS_SEL,
345 VMX_VMCS16_GUEST_GS_SEL,
346 VMX_VMCS16_GUEST_LDTR_SEL,
347 VMX_VMCS16_GUEST_TR_SEL,
348 VMX_VMCS16_GUEST_INTR_STATUS,
349 VMX_VMCS16_GUEST_PML_INDEX,
350
351 /* 16-bits host-state fields. */
352 VMX_VMCS16_HOST_ES_SEL,
353 VMX_VMCS16_HOST_CS_SEL,
354 VMX_VMCS16_HOST_SS_SEL,
355 VMX_VMCS16_HOST_DS_SEL,
356 VMX_VMCS16_HOST_FS_SEL,
357 VMX_VMCS16_HOST_GS_SEL,
358 VMX_VMCS16_HOST_TR_SEL,
359
360 /* 64-bit control fields. */
361 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
362 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
363 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
364 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
365 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
366 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
367 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
368 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
369 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
370 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
371 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
372 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
373 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
374 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
375 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
376 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
377 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
378 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
379 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
380 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
381 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
382 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
383 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
384 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
385 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
386 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
387 VMX_VMCS64_CTRL_EPTP_FULL,
388 VMX_VMCS64_CTRL_EPTP_HIGH,
389 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
390 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
391 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
392 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
393 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
394 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
395 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
396 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
397 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
398 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
399 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
400 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
401 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
402 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
403 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
404 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
405 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
406 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
407 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
408 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
409 VMX_VMCS64_CTRL_SPPTP_FULL,
410 VMX_VMCS64_CTRL_SPPTP_HIGH,
411 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
412 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
413 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
414 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
415 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
416 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
417
418 /* 64-bit read-only data fields. */
419 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
420 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
421
422 /* 64-bit guest-state fields. */
423 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
424 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
425 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
426 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
427 VMX_VMCS64_GUEST_PAT_FULL,
428 VMX_VMCS64_GUEST_PAT_HIGH,
429 VMX_VMCS64_GUEST_EFER_FULL,
430 VMX_VMCS64_GUEST_EFER_HIGH,
431 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
432 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
433 VMX_VMCS64_GUEST_PDPTE0_FULL,
434 VMX_VMCS64_GUEST_PDPTE0_HIGH,
435 VMX_VMCS64_GUEST_PDPTE1_FULL,
436 VMX_VMCS64_GUEST_PDPTE1_HIGH,
437 VMX_VMCS64_GUEST_PDPTE2_FULL,
438 VMX_VMCS64_GUEST_PDPTE2_HIGH,
439 VMX_VMCS64_GUEST_PDPTE3_FULL,
440 VMX_VMCS64_GUEST_PDPTE3_HIGH,
441 VMX_VMCS64_GUEST_BNDCFGS_FULL,
442 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
443 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
444 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
445 VMX_VMCS64_GUEST_PKRS_FULL,
446 VMX_VMCS64_GUEST_PKRS_HIGH,
447
448 /* 64-bit host-state fields. */
449 VMX_VMCS64_HOST_PAT_FULL,
450 VMX_VMCS64_HOST_PAT_HIGH,
451 VMX_VMCS64_HOST_EFER_FULL,
452 VMX_VMCS64_HOST_EFER_HIGH,
453 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
454 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
455 VMX_VMCS64_HOST_PKRS_FULL,
456 VMX_VMCS64_HOST_PKRS_HIGH,
457
458 /* 32-bit control fields. */
459 VMX_VMCS32_CTRL_PIN_EXEC,
460 VMX_VMCS32_CTRL_PROC_EXEC,
461 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
462 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
463 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
464 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
465 VMX_VMCS32_CTRL_EXIT,
466 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
467 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
468 VMX_VMCS32_CTRL_ENTRY,
469 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
470 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
471 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
472 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
473 VMX_VMCS32_CTRL_TPR_THRESHOLD,
474 VMX_VMCS32_CTRL_PROC_EXEC2,
475 VMX_VMCS32_CTRL_PLE_GAP,
476 VMX_VMCS32_CTRL_PLE_WINDOW,
477
478 /* 32-bits read-only fields. */
479 VMX_VMCS32_RO_VM_INSTR_ERROR,
480 VMX_VMCS32_RO_EXIT_REASON,
481 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
482 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
483 VMX_VMCS32_RO_IDT_VECTORING_INFO,
484 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
485 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
486 VMX_VMCS32_RO_EXIT_INSTR_INFO,
487
488 /* 32-bit guest-state fields. */
489 VMX_VMCS32_GUEST_ES_LIMIT,
490 VMX_VMCS32_GUEST_CS_LIMIT,
491 VMX_VMCS32_GUEST_SS_LIMIT,
492 VMX_VMCS32_GUEST_DS_LIMIT,
493 VMX_VMCS32_GUEST_FS_LIMIT,
494 VMX_VMCS32_GUEST_GS_LIMIT,
495 VMX_VMCS32_GUEST_LDTR_LIMIT,
496 VMX_VMCS32_GUEST_TR_LIMIT,
497 VMX_VMCS32_GUEST_GDTR_LIMIT,
498 VMX_VMCS32_GUEST_IDTR_LIMIT,
499 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
501 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
502 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
503 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
504 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
505 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
506 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
507 VMX_VMCS32_GUEST_INT_STATE,
508 VMX_VMCS32_GUEST_ACTIVITY_STATE,
509 VMX_VMCS32_GUEST_SMBASE,
510 VMX_VMCS32_GUEST_SYSENTER_CS,
511 VMX_VMCS32_PREEMPT_TIMER_VALUE,
512
513 /* 32-bit host-state fields. */
514 VMX_VMCS32_HOST_SYSENTER_CS,
515
516 /* Natural-width control fields. */
517 VMX_VMCS_CTRL_CR0_MASK,
518 VMX_VMCS_CTRL_CR4_MASK,
519 VMX_VMCS_CTRL_CR0_READ_SHADOW,
520 VMX_VMCS_CTRL_CR4_READ_SHADOW,
521 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
522 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
523 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
524 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
525
526 /* Natural-width read-only data fields. */
527 VMX_VMCS_RO_EXIT_QUALIFICATION,
528 VMX_VMCS_RO_IO_RCX,
529 VMX_VMCS_RO_IO_RSI,
530 VMX_VMCS_RO_IO_RDI,
531 VMX_VMCS_RO_IO_RIP,
532 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
533
534 /* Natural-width guest-state field */
535 VMX_VMCS_GUEST_CR0,
536 VMX_VMCS_GUEST_CR3,
537 VMX_VMCS_GUEST_CR4,
538 VMX_VMCS_GUEST_ES_BASE,
539 VMX_VMCS_GUEST_CS_BASE,
540 VMX_VMCS_GUEST_SS_BASE,
541 VMX_VMCS_GUEST_DS_BASE,
542 VMX_VMCS_GUEST_FS_BASE,
543 VMX_VMCS_GUEST_GS_BASE,
544 VMX_VMCS_GUEST_LDTR_BASE,
545 VMX_VMCS_GUEST_TR_BASE,
546 VMX_VMCS_GUEST_GDTR_BASE,
547 VMX_VMCS_GUEST_IDTR_BASE,
548 VMX_VMCS_GUEST_DR7,
549 VMX_VMCS_GUEST_RSP,
550 VMX_VMCS_GUEST_RIP,
551 VMX_VMCS_GUEST_RFLAGS,
552 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
553 VMX_VMCS_GUEST_SYSENTER_ESP,
554 VMX_VMCS_GUEST_SYSENTER_EIP,
555 VMX_VMCS_GUEST_S_CET,
556 VMX_VMCS_GUEST_SSP,
557 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
558
559 /* Natural-width host-state fields */
560 VMX_VMCS_HOST_CR0,
561 VMX_VMCS_HOST_CR3,
562 VMX_VMCS_HOST_CR4,
563 VMX_VMCS_HOST_FS_BASE,
564 VMX_VMCS_HOST_GS_BASE,
565 VMX_VMCS_HOST_TR_BASE,
566 VMX_VMCS_HOST_GDTR_BASE,
567 VMX_VMCS_HOST_IDTR_BASE,
568 VMX_VMCS_HOST_SYSENTER_ESP,
569 VMX_VMCS_HOST_SYSENTER_EIP,
570 VMX_VMCS_HOST_RSP,
571 VMX_VMCS_HOST_RIP,
572 VMX_VMCS_HOST_S_CET,
573 VMX_VMCS_HOST_SSP,
574 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
575};
576#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
577
578#ifdef VBOX_STRICT
579static const uint32_t g_aVmcsSegBase[] =
580{
581 VMX_VMCS_GUEST_ES_BASE,
582 VMX_VMCS_GUEST_CS_BASE,
583 VMX_VMCS_GUEST_SS_BASE,
584 VMX_VMCS_GUEST_DS_BASE,
585 VMX_VMCS_GUEST_FS_BASE,
586 VMX_VMCS_GUEST_GS_BASE
587};
588static const uint32_t g_aVmcsSegSel[] =
589{
590 VMX_VMCS16_GUEST_ES_SEL,
591 VMX_VMCS16_GUEST_CS_SEL,
592 VMX_VMCS16_GUEST_SS_SEL,
593 VMX_VMCS16_GUEST_DS_SEL,
594 VMX_VMCS16_GUEST_FS_SEL,
595 VMX_VMCS16_GUEST_GS_SEL
596};
597static const uint32_t g_aVmcsSegLimit[] =
598{
599 VMX_VMCS32_GUEST_ES_LIMIT,
600 VMX_VMCS32_GUEST_CS_LIMIT,
601 VMX_VMCS32_GUEST_SS_LIMIT,
602 VMX_VMCS32_GUEST_DS_LIMIT,
603 VMX_VMCS32_GUEST_FS_LIMIT,
604 VMX_VMCS32_GUEST_GS_LIMIT
605};
606static const uint32_t g_aVmcsSegAttr[] =
607{
608 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
609 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
610 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
611 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
612 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
613 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
614};
615AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
616AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
617AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
618AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
619#endif /* VBOX_STRICT */
620
621#ifdef HMVMX_USE_FUNCTION_TABLE
622/**
623 * VMX_EXIT dispatch table.
624 */
625static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
626{
627 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
628 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
629 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
630 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
631 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
632 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
633 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
634 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
635 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
636 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
637 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
638 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
639 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
640 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
641 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
642 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
643 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
644 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
645 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
646#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
647 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
648 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
649 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
650 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
651 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
652 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
653 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
654 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
655 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
656#else
657 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
658 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
659 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
660 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
661 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
662 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
663 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
664 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
665 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
666#endif
667 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
668 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
669 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
670 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
671 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
672 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
673 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
674 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
675 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
676 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
677 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
678 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
679 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
680 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
681 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
682 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
683 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
684 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
685 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
686 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
687 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
688 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
689#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
690 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitInvept },
691#else
692 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
693#endif
694 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
695 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
696#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
697 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
698#else
699 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
700#endif
701 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
702 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
703 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
704 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
705 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
706 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
707 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
708 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
709 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
710 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
711 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
712 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
713 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
714 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
715 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
716 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
717};
718#endif /* HMVMX_USE_FUNCTION_TABLE */
719
720#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
721static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
722{
723 /* 0 */ "(Not Used)",
724 /* 1 */ "VMCALL executed in VMX root operation.",
725 /* 2 */ "VMCLEAR with invalid physical address.",
726 /* 3 */ "VMCLEAR with VMXON pointer.",
727 /* 4 */ "VMLAUNCH with non-clear VMCS.",
728 /* 5 */ "VMRESUME with non-launched VMCS.",
729 /* 6 */ "VMRESUME after VMXOFF",
730 /* 7 */ "VM-entry with invalid control fields.",
731 /* 8 */ "VM-entry with invalid host state fields.",
732 /* 9 */ "VMPTRLD with invalid physical address.",
733 /* 10 */ "VMPTRLD with VMXON pointer.",
734 /* 11 */ "VMPTRLD with incorrect revision identifier.",
735 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
736 /* 13 */ "VMWRITE to read-only VMCS component.",
737 /* 14 */ "(Not Used)",
738 /* 15 */ "VMXON executed in VMX root operation.",
739 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
740 /* 17 */ "VM-entry with non-launched executing VMCS.",
741 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
742 /* 19 */ "VMCALL with non-clear VMCS.",
743 /* 20 */ "VMCALL with invalid VM-exit control fields.",
744 /* 21 */ "(Not Used)",
745 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
746 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
747 /* 24 */ "VMCALL with invalid SMM-monitor features.",
748 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
749 /* 26 */ "VM-entry with events blocked by MOV SS.",
750 /* 27 */ "(Not Used)",
751 /* 28 */ "Invalid operand to INVEPT/INVVPID."
752};
753#endif /* VBOX_STRICT && LOG_ENABLED */
754
755
756/**
757 * Gets the CR0 guest/host mask.
758 *
759 * These bits typically does not change through the lifetime of a VM. Any bit set in
760 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
761 * by the guest.
762 *
763 * @returns The CR0 guest/host mask.
764 * @param pVCpu The cross context virtual CPU structure.
765 */
766static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
767{
768 /*
769 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
770 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
771 *
772 * Furthermore, modifications to any bits that are reserved/unspecified currently
773 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
774 * when future CPUs specify and use currently reserved/unspecified bits.
775 */
776 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
777 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
778 * and @bugref{6944}. */
779 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
780 return ( X86_CR0_PE
781 | X86_CR0_NE
782 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
783 | X86_CR0_PG
784 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
785}
786
787
788/**
789 * Gets the CR4 guest/host mask.
790 *
791 * These bits typically does not change through the lifetime of a VM. Any bit set in
792 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
793 * by the guest.
794 *
795 * @returns The CR4 guest/host mask.
796 * @param pVCpu The cross context virtual CPU structure.
797 */
798static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
799{
800 /*
801 * We construct a mask of all CR4 bits that the guest can modify without causing
802 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
803 * a VM-exit when the guest attempts to modify them when executing using
804 * hardware-assisted VMX.
805 *
806 * When a feature is not exposed to the guest (and may be present on the host),
807 * we want to intercept guest modifications to the bit so we can emulate proper
808 * behavior (e.g., #GP).
809 *
810 * Furthermore, only modifications to those bits that don't require immediate
811 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
812 * depends on CR3 which might not always be the guest value while executing
813 * using hardware-assisted VMX.
814 */
815 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
816 bool const fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
817 bool const fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
818 bool const fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
819
820 /*
821 * Paranoia.
822 * Ensure features exposed to the guest are present on the host.
823 */
824 Assert(!fFsGsBase || pVM->cpum.ro.HostFeatures.fFsGsBase);
825 Assert(!fXSaveRstor || pVM->cpum.ro.HostFeatures.fXSaveRstor);
826 Assert(!fFxSaveRstor || pVM->cpum.ro.HostFeatures.fFxSaveRstor);
827
828 uint64_t const fGstMask = ( X86_CR4_PVI
829 | X86_CR4_TSD
830 | X86_CR4_DE
831 | X86_CR4_MCE
832 | X86_CR4_PCE
833 | X86_CR4_OSXMMEEXCPT
834 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
835 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
836 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0));
837 return ~fGstMask;
838}
839
840
841/**
842 * Adds one or more exceptions to the exception bitmap and commits it to the current
843 * VMCS.
844 *
845 * @param pVCpu The cross context virtual CPU structure.
846 * @param pVmxTransient The VMX-transient structure.
847 * @param uXcptMask The exception(s) to add.
848 */
849static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
850{
851 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
852 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
853 if ((uXcptBitmap & uXcptMask) != uXcptMask)
854 {
855 uXcptBitmap |= uXcptMask;
856 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
857 AssertRC(rc);
858 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
859 }
860}
861
862
863/**
864 * Adds an exception to the exception bitmap and commits it to the current VMCS.
865 *
866 * @param pVCpu The cross context virtual CPU structure.
867 * @param pVmxTransient The VMX-transient structure.
868 * @param uXcpt The exception to add.
869 */
870static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
871{
872 Assert(uXcpt <= X86_XCPT_LAST);
873 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
874}
875
876
877/**
878 * Remove one or more exceptions from the exception bitmap and commits it to the
879 * current VMCS.
880 *
881 * This takes care of not removing the exception intercept if a nested-guest
882 * requires the exception to be intercepted.
883 *
884 * @returns VBox status code.
885 * @param pVCpu The cross context virtual CPU structure.
886 * @param pVmxTransient The VMX-transient structure.
887 * @param uXcptMask The exception(s) to remove.
888 */
889static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
890{
891 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
892 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
893 if (u32XcptBitmap & uXcptMask)
894 {
895#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
896 if (!pVmxTransient->fIsNestedGuest)
897 { /* likely */ }
898 else
899 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
900#endif
901#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
902 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
903 | RT_BIT(X86_XCPT_DE)
904 | RT_BIT(X86_XCPT_NM)
905 | RT_BIT(X86_XCPT_TS)
906 | RT_BIT(X86_XCPT_UD)
907 | RT_BIT(X86_XCPT_NP)
908 | RT_BIT(X86_XCPT_SS)
909 | RT_BIT(X86_XCPT_GP)
910 | RT_BIT(X86_XCPT_PF)
911 | RT_BIT(X86_XCPT_MF));
912#elif defined(HMVMX_ALWAYS_TRAP_PF)
913 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
914#endif
915 if (uXcptMask)
916 {
917 /* Validate we are not removing any essential exception intercepts. */
918#ifndef IN_NEM_DARWIN
919 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
920#else
921 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
922#endif
923 NOREF(pVCpu);
924 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
925 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
926
927 /* Remove it from the exception bitmap. */
928 u32XcptBitmap &= ~uXcptMask;
929
930 /* Commit and update the cache if necessary. */
931 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
932 {
933 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
934 AssertRC(rc);
935 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
936 }
937 }
938 }
939 return VINF_SUCCESS;
940}
941
942
943/**
944 * Remove an exceptions from the exception bitmap and commits it to the current
945 * VMCS.
946 *
947 * @returns VBox status code.
948 * @param pVCpu The cross context virtual CPU structure.
949 * @param pVmxTransient The VMX-transient structure.
950 * @param uXcpt The exception to remove.
951 */
952static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
953{
954 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
955}
956
957
958#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
959/**
960 * Loads the shadow VMCS specified by the VMCS info. object.
961 *
962 * @returns VBox status code.
963 * @param pVmcsInfo The VMCS info. object.
964 *
965 * @remarks Can be called with interrupts disabled.
966 */
967static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
968{
969 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
970 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
971
972 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
973 if (RT_SUCCESS(rc))
974 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
975 return rc;
976}
977
978
979/**
980 * Clears the shadow VMCS specified by the VMCS info. object.
981 *
982 * @returns VBox status code.
983 * @param pVmcsInfo The VMCS info. object.
984 *
985 * @remarks Can be called with interrupts disabled.
986 */
987static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
988{
989 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
990 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
991
992 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
993 if (RT_SUCCESS(rc))
994 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
995 return rc;
996}
997
998
999/**
1000 * Switches from and to the specified VMCSes.
1001 *
1002 * @returns VBox status code.
1003 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
1004 * @param pVmcsInfoTo The VMCS info. object we are switching to.
1005 *
1006 * @remarks Called with interrupts disabled.
1007 */
1008static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
1009{
1010 /*
1011 * Clear the VMCS we are switching out if it has not already been cleared.
1012 * This will sync any CPU internal data back to the VMCS.
1013 */
1014 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1015 {
1016 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
1017 if (RT_SUCCESS(rc))
1018 {
1019 /*
1020 * The shadow VMCS, if any, would not be active at this point since we
1021 * would have cleared it while importing the virtual hardware-virtualization
1022 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1023 * clear the shadow VMCS here, just assert for safety.
1024 */
1025 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1026 }
1027 else
1028 return rc;
1029 }
1030
1031 /*
1032 * Clear the VMCS we are switching to if it has not already been cleared.
1033 * This will initialize the VMCS launch state to "clear" required for loading it.
1034 *
1035 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1036 */
1037 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1038 {
1039 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1040 if (RT_SUCCESS(rc))
1041 { /* likely */ }
1042 else
1043 return rc;
1044 }
1045
1046 /*
1047 * Finally, load the VMCS we are switching to.
1048 */
1049 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1050}
1051
1052
1053/**
1054 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1055 * caller.
1056 *
1057 * @returns VBox status code.
1058 * @param pVCpu The cross context virtual CPU structure.
1059 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1060 * true) or guest VMCS (pass false).
1061 */
1062static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1063{
1064 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1065 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1066
1067 PVMXVMCSINFO pVmcsInfoFrom;
1068 PVMXVMCSINFO pVmcsInfoTo;
1069 if (fSwitchToNstGstVmcs)
1070 {
1071 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1072 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1073 }
1074 else
1075 {
1076 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1077 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1078 }
1079
1080 /*
1081 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1082 * preemption hook code path acquires the current VMCS.
1083 */
1084 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1085
1086 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1087 if (RT_SUCCESS(rc))
1088 {
1089 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1090 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1091
1092 /*
1093 * If we are switching to a VMCS that was executed on a different host CPU or was
1094 * never executed before, flag that we need to export the host state before executing
1095 * guest/nested-guest code using hardware-assisted VMX.
1096 *
1097 * This could probably be done in a preemptible context since the preemption hook
1098 * will flag the necessary change in host context. However, since preemption is
1099 * already disabled and to avoid making assumptions about host specific code in
1100 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1101 * disabled.
1102 */
1103 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1104 { /* likely */ }
1105 else
1106 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1107
1108 ASMSetFlags(fEFlags);
1109
1110 /*
1111 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1112 * flag that we need to update the host MSR values there. Even if we decide in the
1113 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1114 * if its content differs, we would have to update the host MSRs anyway.
1115 */
1116 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1117 }
1118 else
1119 ASMSetFlags(fEFlags);
1120 return rc;
1121}
1122#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1123
1124
1125#ifdef VBOX_STRICT
1126/**
1127 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1128 * transient structure.
1129 *
1130 * @param pVCpu The cross context virtual CPU structure.
1131 * @param pVmxTransient The VMX-transient structure.
1132 */
1133DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1134{
1135 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1136 AssertRC(rc);
1137}
1138
1139
1140/**
1141 * Reads the VM-entry exception error code field from the VMCS into
1142 * the VMX transient structure.
1143 *
1144 * @param pVCpu The cross context virtual CPU structure.
1145 * @param pVmxTransient The VMX-transient structure.
1146 */
1147DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1148{
1149 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1150 AssertRC(rc);
1151}
1152
1153
1154/**
1155 * Reads the VM-entry exception error code field from the VMCS into
1156 * the VMX transient structure.
1157 *
1158 * @param pVCpu The cross context virtual CPU structure.
1159 * @param pVmxTransient The VMX-transient structure.
1160 */
1161DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1162{
1163 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1164 AssertRC(rc);
1165}
1166#endif /* VBOX_STRICT */
1167
1168
1169/**
1170 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1171 * transient structure.
1172 *
1173 * @param pVCpu The cross context virtual CPU structure.
1174 * @param pVmxTransient The VMX-transient structure.
1175 */
1176DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1177{
1178 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1179 {
1180 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1181 AssertRC(rc);
1182 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1183 }
1184}
1185
1186
1187/**
1188 * Reads the VM-exit interruption error code from the VMCS into the VMX
1189 * transient structure.
1190 *
1191 * @param pVCpu The cross context virtual CPU structure.
1192 * @param pVmxTransient The VMX-transient structure.
1193 */
1194DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1195{
1196 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1197 {
1198 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1199 AssertRC(rc);
1200 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1201 }
1202}
1203
1204
1205/**
1206 * Reads the VM-exit instruction length field from the VMCS into the VMX
1207 * transient structure.
1208 *
1209 * @param pVCpu The cross context virtual CPU structure.
1210 * @param pVmxTransient The VMX-transient structure.
1211 */
1212DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1213{
1214 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1215 {
1216 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1217 AssertRC(rc);
1218 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1219 }
1220}
1221
1222
1223/**
1224 * Reads the VM-exit instruction-information field from the VMCS into
1225 * the VMX transient structure.
1226 *
1227 * @param pVCpu The cross context virtual CPU structure.
1228 * @param pVmxTransient The VMX-transient structure.
1229 */
1230DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1231{
1232 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1233 {
1234 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1235 AssertRC(rc);
1236 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1237 }
1238}
1239
1240
1241/**
1242 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1243 *
1244 * @param pVCpu The cross context virtual CPU structure.
1245 * @param pVmxTransient The VMX-transient structure.
1246 */
1247DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1248{
1249 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1250 {
1251 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1252 AssertRC(rc);
1253 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1254 }
1255}
1256
1257
1258/**
1259 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1260 *
1261 * @param pVCpu The cross context virtual CPU structure.
1262 * @param pVmxTransient The VMX-transient structure.
1263 */
1264DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1265{
1266 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1267 {
1268 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1269 AssertRC(rc);
1270 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1271 }
1272}
1273
1274
1275/**
1276 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1277 *
1278 * @param pVCpu The cross context virtual CPU structure.
1279 * @param pVmxTransient The VMX-transient structure.
1280 */
1281DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1282{
1283 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1284 {
1285 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1286 AssertRC(rc);
1287 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1288 }
1289}
1290
1291#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1292/**
1293 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1294 * structure.
1295 *
1296 * @param pVCpu The cross context virtual CPU structure.
1297 * @param pVmxTransient The VMX-transient structure.
1298 */
1299DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1300{
1301 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1302 {
1303 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1304 AssertRC(rc);
1305 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1306 }
1307}
1308#endif
1309
1310/**
1311 * Reads the IDT-vectoring information field from the VMCS into the VMX
1312 * transient structure.
1313 *
1314 * @param pVCpu The cross context virtual CPU structure.
1315 * @param pVmxTransient The VMX-transient structure.
1316 *
1317 * @remarks No-long-jump zone!!!
1318 */
1319DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1320{
1321 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1322 {
1323 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1324 AssertRC(rc);
1325 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1326 }
1327}
1328
1329
1330/**
1331 * Reads the IDT-vectoring error code from the VMCS into the VMX
1332 * transient structure.
1333 *
1334 * @param pVCpu The cross context virtual CPU structure.
1335 * @param pVmxTransient The VMX-transient structure.
1336 */
1337DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1338{
1339 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1340 {
1341 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1342 AssertRC(rc);
1343 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1344 }
1345}
1346
1347#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1348/**
1349 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1350 *
1351 * @param pVCpu The cross context virtual CPU structure.
1352 * @param pVmxTransient The VMX-transient structure.
1353 */
1354static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1355{
1356 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1357 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1358 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1359 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1360 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1361 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1362 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1363 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1364 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1365 AssertRC(rc);
1366 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1367 | HMVMX_READ_EXIT_INSTR_LEN
1368 | HMVMX_READ_EXIT_INSTR_INFO
1369 | HMVMX_READ_IDT_VECTORING_INFO
1370 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1371 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1372 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1373 | HMVMX_READ_GUEST_LINEAR_ADDR
1374 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1375}
1376#endif
1377
1378/**
1379 * Verifies that our cached values of the VMCS fields are all consistent with
1380 * what's actually present in the VMCS.
1381 *
1382 * @returns VBox status code.
1383 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1384 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1385 * VMCS content. HMCPU error-field is
1386 * updated, see VMX_VCI_XXX.
1387 * @param pVCpu The cross context virtual CPU structure.
1388 * @param pVmcsInfo The VMCS info. object.
1389 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1390 */
1391static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1392{
1393 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1394
1395 uint32_t u32Val;
1396 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1397 AssertRC(rc);
1398 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1399 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1400 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1401 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1402
1403 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1404 AssertRC(rc);
1405 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1406 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1407 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1408 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1409
1410 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1411 AssertRC(rc);
1412 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1413 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1414 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1415 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1416
1417 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1418 AssertRC(rc);
1419 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1420 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1421 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1422 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1423
1424 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1425 {
1426 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1427 AssertRC(rc);
1428 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1429 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1430 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1431 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1432 }
1433
1434 uint64_t u64Val;
1435 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1436 {
1437 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1438 AssertRC(rc);
1439 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1440 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1441 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1442 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1443 }
1444
1445 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1446 AssertRC(rc);
1447 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1448 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1449 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1450 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1451
1452 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1453 AssertRC(rc);
1454 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1455 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1456 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1457 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1458
1459 NOREF(pcszVmcs);
1460 return VINF_SUCCESS;
1461}
1462
1463
1464/**
1465 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
1466 * VMCS.
1467 *
1468 * This is typically required when the guest changes paging mode.
1469 *
1470 * @returns VBox status code.
1471 * @param pVCpu The cross context virtual CPU structure.
1472 * @param pVmxTransient The VMX-transient structure.
1473 *
1474 * @remarks Requires EFER.
1475 * @remarks No-long-jump zone!!!
1476 */
1477static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1478{
1479 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
1480 {
1481 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1482 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1483
1484 /*
1485 * VM-entry controls.
1486 */
1487 {
1488 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1489 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1490
1491 /*
1492 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
1493 * The first VT-x capable CPUs only supported the 1-setting of this bit.
1494 *
1495 * For nested-guests, this is a mandatory VM-entry control. It's also
1496 * required because we do not want to leak host bits to the nested-guest.
1497 */
1498 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
1499
1500 /*
1501 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
1502 *
1503 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
1504 * required to get the nested-guest working with hardware-assisted VMX execution.
1505 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
1506 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
1507 * here rather than while merging the guest VMCS controls.
1508 */
1509 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
1510 {
1511 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
1512 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
1513 }
1514 else
1515 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
1516
1517 /*
1518 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
1519 *
1520 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
1521 * regardless of whether the nested-guest VMCS specifies it because we are free to
1522 * load whatever MSRs we require and we do not need to modify the guest visible copy
1523 * of the VM-entry MSR load area.
1524 */
1525 if ( g_fHmVmxSupportsVmcsEfer
1526#ifndef IN_NEM_DARWIN
1527 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
1528#endif
1529 )
1530 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
1531 else
1532 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
1533
1534 /*
1535 * The following should -not- be set (since we're not in SMM mode):
1536 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
1537 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
1538 */
1539
1540 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
1541 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
1542
1543 if ((fVal & fZap) == fVal)
1544 { /* likely */ }
1545 else
1546 {
1547 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1548 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
1549 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
1550 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1551 }
1552
1553 /* Commit it to the VMCS. */
1554 if (pVmcsInfo->u32EntryCtls != fVal)
1555 {
1556 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
1557 AssertRC(rc);
1558 pVmcsInfo->u32EntryCtls = fVal;
1559 }
1560 }
1561
1562 /*
1563 * VM-exit controls.
1564 */
1565 {
1566 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1567 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1568
1569 /*
1570 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
1571 * supported the 1-setting of this bit.
1572 *
1573 * For nested-guests, we set the "save debug controls" as the converse
1574 * "load debug controls" is mandatory for nested-guests anyway.
1575 */
1576 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
1577
1578 /*
1579 * Set the host long mode active (EFER.LMA) bit (which Intel calls
1580 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
1581 * host EFER.LMA and EFER.LME bit to this value. See assertion in
1582 * vmxHCExportHostMsrs().
1583 *
1584 * For nested-guests, we always set this bit as we do not support 32-bit
1585 * hosts.
1586 */
1587 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
1588
1589#ifndef IN_NEM_DARWIN
1590 /*
1591 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
1592 *
1593 * For nested-guests, we should use the "save IA32_EFER" control if we also
1594 * used the "load IA32_EFER" control while exporting VM-entry controls.
1595 */
1596 if ( g_fHmVmxSupportsVmcsEfer
1597 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
1598 {
1599 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
1600 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
1601 }
1602#endif
1603
1604 /*
1605 * Enable saving of the VMX-preemption timer value on VM-exit.
1606 * For nested-guests, currently not exposed/used.
1607 */
1608 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
1609 * the timer value. */
1610 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
1611 {
1612 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1613 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
1614 }
1615
1616 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
1617 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
1618
1619 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
1620 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
1621 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
1622
1623 if ((fVal & fZap) == fVal)
1624 { /* likely */ }
1625 else
1626 {
1627 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1628 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
1629 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
1630 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1631 }
1632
1633 /* Commit it to the VMCS. */
1634 if (pVmcsInfo->u32ExitCtls != fVal)
1635 {
1636 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
1637 AssertRC(rc);
1638 pVmcsInfo->u32ExitCtls = fVal;
1639 }
1640 }
1641
1642 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
1643 }
1644 return VINF_SUCCESS;
1645}
1646
1647
1648/**
1649 * Sets the TPR threshold in the VMCS.
1650 *
1651 * @param pVCpu The cross context virtual CPU structure.
1652 * @param pVmcsInfo The VMCS info. object.
1653 * @param u32TprThreshold The TPR threshold (task-priority class only).
1654 */
1655DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
1656{
1657 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
1658 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
1659 RT_NOREF(pVmcsInfo);
1660 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
1661 AssertRC(rc);
1662}
1663
1664
1665/**
1666 * Exports the guest APIC TPR state into the VMCS.
1667 *
1668 * @param pVCpu The cross context virtual CPU structure.
1669 * @param pVmxTransient The VMX-transient structure.
1670 *
1671 * @remarks No-long-jump zone!!!
1672 */
1673static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1674{
1675 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
1676 {
1677 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
1678
1679 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1680 if (!pVmxTransient->fIsNestedGuest)
1681 {
1682 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1683 && APICIsEnabled(pVCpu))
1684 {
1685 /*
1686 * Setup TPR shadowing.
1687 */
1688 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
1689 {
1690 bool fPendingIntr = false;
1691 uint8_t u8Tpr = 0;
1692 uint8_t u8PendingIntr = 0;
1693 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
1694 AssertRC(rc);
1695
1696 /*
1697 * If there are interrupts pending but masked by the TPR, instruct VT-x to
1698 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
1699 * priority of the pending interrupt so we can deliver the interrupt. If there
1700 * are no interrupts pending, set threshold to 0 to not cause any
1701 * TPR-below-threshold VM-exits.
1702 */
1703 uint32_t u32TprThreshold = 0;
1704 if (fPendingIntr)
1705 {
1706 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
1707 (which is the Task-Priority Class). */
1708 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
1709 const uint8_t u8TprPriority = u8Tpr >> 4;
1710 if (u8PendingPriority <= u8TprPriority)
1711 u32TprThreshold = u8PendingPriority;
1712 }
1713
1714 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
1715 }
1716 }
1717 }
1718 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
1719 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1720 }
1721}
1722
1723
1724/**
1725 * Gets the guest interruptibility-state and updates related force-flags.
1726 *
1727 * @returns Guest's interruptibility-state.
1728 * @param pVCpu The cross context virtual CPU structure.
1729 *
1730 * @remarks No-long-jump zone!!!
1731 */
1732static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
1733{
1734 /*
1735 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
1736 */
1737 uint32_t fIntrState = 0;
1738 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1739 {
1740 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
1741 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
1742
1743 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1744 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
1745 {
1746 if (pCtx->eflags.Bits.u1IF)
1747 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1748 else
1749 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
1750 }
1751 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1752 {
1753 /*
1754 * We can clear the inhibit force flag as even if we go back to the recompiler
1755 * without executing guest code in VT-x, the flag's condition to be cleared is
1756 * met and thus the cleared state is correct.
1757 */
1758 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1759 }
1760 }
1761
1762 /*
1763 * Check if we should inhibit NMI delivery.
1764 */
1765 if (CPUMIsGuestNmiBlocking(pVCpu))
1766 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1767
1768 /*
1769 * Validate.
1770 */
1771#ifdef VBOX_STRICT
1772 /* We don't support block-by-SMI yet.*/
1773 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
1774
1775 /* Block-by-STI must not be set when interrupts are disabled. */
1776 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
1777 {
1778 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1779 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
1780 }
1781#endif
1782
1783 return fIntrState;
1784}
1785
1786
1787/**
1788 * Exports the exception intercepts required for guest execution in the VMCS.
1789 *
1790 * @param pVCpu The cross context virtual CPU structure.
1791 * @param pVmxTransient The VMX-transient structure.
1792 *
1793 * @remarks No-long-jump zone!!!
1794 */
1795static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1796{
1797 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
1798 {
1799 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
1800 if ( !pVmxTransient->fIsNestedGuest
1801 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
1802 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1803 else
1804 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
1805
1806 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
1807 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
1808 }
1809}
1810
1811
1812/**
1813 * Exports the guest's RIP into the guest-state area in the VMCS.
1814 *
1815 * @param pVCpu The cross context virtual CPU structure.
1816 *
1817 * @remarks No-long-jump zone!!!
1818 */
1819static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
1820{
1821 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
1822 {
1823 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
1824
1825 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
1826 AssertRC(rc);
1827
1828 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
1829 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
1830 }
1831}
1832
1833
1834/**
1835 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
1836 *
1837 * @param pVCpu The cross context virtual CPU structure.
1838 * @param pVmxTransient The VMX-transient structure.
1839 *
1840 * @remarks No-long-jump zone!!!
1841 */
1842static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
1843{
1844 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
1845 {
1846 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1847
1848 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
1849 Let us assert it as such and use 32-bit VMWRITE. */
1850 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
1851 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
1852 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
1853 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
1854
1855#ifndef IN_NEM_DARWIN
1856 /*
1857 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
1858 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
1859 * can run the real-mode guest code under Virtual 8086 mode.
1860 */
1861 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
1862 if (pVmcsInfo->RealMode.fRealOnV86Active)
1863 {
1864 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
1865 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
1866 Assert(!pVmxTransient->fIsNestedGuest);
1867 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
1868 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
1869 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
1870 }
1871#else
1872 RT_NOREF(pVmxTransient);
1873#endif
1874
1875 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
1876 AssertRC(rc);
1877
1878 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
1879 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
1880 }
1881}
1882
1883
1884#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1885/**
1886 * Copies the nested-guest VMCS to the shadow VMCS.
1887 *
1888 * @returns VBox status code.
1889 * @param pVCpu The cross context virtual CPU structure.
1890 * @param pVmcsInfo The VMCS info. object.
1891 *
1892 * @remarks No-long-jump zone!!!
1893 */
1894static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1895{
1896 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1897 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1898
1899 /*
1900 * Disable interrupts so we don't get preempted while the shadow VMCS is the
1901 * current VMCS, as we may try saving guest lazy MSRs.
1902 *
1903 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
1904 * calling the import VMCS code which is currently performing the guest MSR reads
1905 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
1906 * and the rest of the VMX leave session machinery.
1907 */
1908 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1909
1910 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1911 if (RT_SUCCESS(rc))
1912 {
1913 /*
1914 * Copy all guest read/write VMCS fields.
1915 *
1916 * We don't check for VMWRITE failures here for performance reasons and
1917 * because they are not expected to fail, barring irrecoverable conditions
1918 * like hardware errors.
1919 */
1920 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1921 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1922 {
1923 uint64_t u64Val;
1924 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1925 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1926 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1927 }
1928
1929 /*
1930 * If the host CPU supports writing all VMCS fields, copy the guest read-only
1931 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
1932 */
1933 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
1934 {
1935 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
1936 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
1937 {
1938 uint64_t u64Val;
1939 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
1940 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
1941 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
1942 }
1943 }
1944
1945 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1946 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1947 }
1948
1949 ASMSetFlags(fEFlags);
1950 return rc;
1951}
1952
1953
1954/**
1955 * Copies the shadow VMCS to the nested-guest VMCS.
1956 *
1957 * @returns VBox status code.
1958 * @param pVCpu The cross context virtual CPU structure.
1959 * @param pVmcsInfo The VMCS info. object.
1960 *
1961 * @remarks Called with interrupts disabled.
1962 */
1963static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1964{
1965 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1966 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
1967 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1968
1969 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
1970 if (RT_SUCCESS(rc))
1971 {
1972 /*
1973 * Copy guest read/write fields from the shadow VMCS.
1974 * Guest read-only fields cannot be modified, so no need to copy them.
1975 *
1976 * We don't check for VMREAD failures here for performance reasons and
1977 * because they are not expected to fail, barring irrecoverable conditions
1978 * like hardware errors.
1979 */
1980 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
1981 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
1982 {
1983 uint64_t u64Val;
1984 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
1985 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
1986 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
1987 }
1988
1989 rc = vmxHCClearShadowVmcs(pVmcsInfo);
1990 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
1991 }
1992 return rc;
1993}
1994
1995
1996/**
1997 * Enables VMCS shadowing for the given VMCS info. object.
1998 *
1999 * @param pVCpu The cross context virtual CPU structure.
2000 * @param pVmcsInfo The VMCS info. object.
2001 *
2002 * @remarks No-long-jump zone!!!
2003 */
2004static void vmxHCEnableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2005{
2006 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2007 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
2008 {
2009 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
2010 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
2011 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2012 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
2013 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2014 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
2015 Log4Func(("Enabled\n"));
2016 }
2017}
2018
2019
2020/**
2021 * Disables VMCS shadowing for the given VMCS info. object.
2022 *
2023 * @param pVCpu The cross context virtual CPU structure.
2024 * @param pVmcsInfo The VMCS info. object.
2025 *
2026 * @remarks No-long-jump zone!!!
2027 */
2028static void vmxHCDisableVmcsShadowing(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2029{
2030 /*
2031 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
2032 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
2033 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
2034 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
2035 *
2036 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
2037 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2038 */
2039 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
2040 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
2041 {
2042 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
2043 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
2044 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
2045 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
2046 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
2047 Log4Func(("Disabled\n"));
2048 }
2049}
2050#endif
2051
2052
2053/**
2054 * Exports the guest CR0 control register into the guest-state area in the VMCS.
2055 *
2056 * The guest FPU state is always pre-loaded hence we don't need to bother about
2057 * sharing FPU related CR0 bits between the guest and host.
2058 *
2059 * @returns VBox status code.
2060 * @param pVCpu The cross context virtual CPU structure.
2061 * @param pVmxTransient The VMX-transient structure.
2062 *
2063 * @remarks No-long-jump zone!!!
2064 */
2065static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2066{
2067 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
2068 {
2069 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2070 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2071
2072 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
2073 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
2074 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2075 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
2076 else
2077 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2078
2079 if (!pVmxTransient->fIsNestedGuest)
2080 {
2081 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2082 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2083 uint64_t const u64ShadowCr0 = u64GuestCr0;
2084 Assert(!RT_HI_U32(u64GuestCr0));
2085
2086 /*
2087 * Setup VT-x's view of the guest CR0.
2088 */
2089 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
2090 if (VM_IS_VMX_NESTED_PAGING(pVM))
2091 {
2092#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2093 if (CPUMIsGuestPagingEnabled(pVCpu))
2094 {
2095 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
2096 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
2097 | VMX_PROC_CTLS_CR3_STORE_EXIT);
2098 }
2099 else
2100 {
2101 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
2102 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2103 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2104 }
2105
2106 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2107 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2108 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
2109#endif
2110 }
2111 else
2112 {
2113 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
2114 u64GuestCr0 |= X86_CR0_WP;
2115 }
2116
2117 /*
2118 * Guest FPU bits.
2119 *
2120 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
2121 * using CR0.TS.
2122 *
2123 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
2124 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2125 */
2126 u64GuestCr0 |= X86_CR0_NE;
2127
2128 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
2129 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
2130
2131 /*
2132 * Update exception intercepts.
2133 */
2134 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
2135#ifndef IN_NEM_DARWIN
2136 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2137 {
2138 Assert(PDMVmmDevHeapIsEnabled(pVM));
2139 Assert(pVM->hm.s.vmx.pRealModeTSS);
2140 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2141 }
2142 else
2143#endif
2144 {
2145 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
2146 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2147 if (fInterceptMF)
2148 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
2149 }
2150
2151 /* Additional intercepts for debugging, define these yourself explicitly. */
2152#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2153 uXcptBitmap |= 0
2154 | RT_BIT(X86_XCPT_BP)
2155 | RT_BIT(X86_XCPT_DE)
2156 | RT_BIT(X86_XCPT_NM)
2157 | RT_BIT(X86_XCPT_TS)
2158 | RT_BIT(X86_XCPT_UD)
2159 | RT_BIT(X86_XCPT_NP)
2160 | RT_BIT(X86_XCPT_SS)
2161 | RT_BIT(X86_XCPT_GP)
2162 | RT_BIT(X86_XCPT_PF)
2163 | RT_BIT(X86_XCPT_MF)
2164 ;
2165#elif defined(HMVMX_ALWAYS_TRAP_PF)
2166 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2167#endif
2168 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
2169 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
2170 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
2171
2172 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2173 u64GuestCr0 |= fSetCr0;
2174 u64GuestCr0 &= fZapCr0;
2175 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2176
2177 /* Commit the CR0 and related fields to the guest VMCS. */
2178 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2179 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2180 if (uProcCtls != pVmcsInfo->u32ProcCtls)
2181 {
2182 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
2183 AssertRC(rc);
2184 }
2185 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
2186 {
2187 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2188 AssertRC(rc);
2189 }
2190
2191 /* Update our caches. */
2192 pVmcsInfo->u32ProcCtls = uProcCtls;
2193 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2194
2195 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2196 }
2197 else
2198 {
2199 /*
2200 * With nested-guests, we may have extended the guest/host mask here since we
2201 * merged in the outer guest's mask. Thus, the merged mask can include more bits
2202 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
2203 * originally supplied. We must copy those bits from the nested-guest CR0 into
2204 * the nested-guest CR0 read-shadow.
2205 */
2206 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2207 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
2208 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
2209 Assert(!RT_HI_U32(u64GuestCr0));
2210 Assert(u64GuestCr0 & X86_CR0_NE);
2211
2212 /* Apply the hardware specified CR0 fixed bits and enable caching. */
2213 u64GuestCr0 |= fSetCr0;
2214 u64GuestCr0 &= fZapCr0;
2215 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
2216
2217 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
2218 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
2219 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
2220
2221 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
2222 }
2223
2224 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
2225 }
2226
2227 return VINF_SUCCESS;
2228}
2229
2230
2231/**
2232 * Exports the guest control registers (CR3, CR4) into the guest-state area
2233 * in the VMCS.
2234 *
2235 * @returns VBox strict status code.
2236 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
2237 * without unrestricted guest access and the VMMDev is not presently
2238 * mapped (e.g. EFI32).
2239 *
2240 * @param pVCpu The cross context virtual CPU structure.
2241 * @param pVmxTransient The VMX-transient structure.
2242 *
2243 * @remarks No-long-jump zone!!!
2244 */
2245static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2246{
2247 int rc = VINF_SUCCESS;
2248 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2249
2250 /*
2251 * Guest CR2.
2252 * It's always loaded in the assembler code. Nothing to do here.
2253 */
2254
2255 /*
2256 * Guest CR3.
2257 */
2258 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
2259 {
2260 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2261
2262 if (VM_IS_VMX_NESTED_PAGING(pVM))
2263 {
2264#ifndef IN_NEM_DARWIN
2265 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2266 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2267
2268 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2269 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
2270 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2271 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
2272
2273 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
2274 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
2275 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
2276
2277 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2278 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2279 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
2280 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
2281 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
2282 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
2283 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
2284
2285 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
2286 AssertRC(rc);
2287#endif
2288
2289 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2290 uint64_t u64GuestCr3 = pCtx->cr3;
2291 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2292 || CPUMIsGuestPagingEnabledEx(pCtx))
2293 {
2294 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2295 if (CPUMIsGuestInPAEModeEx(pCtx))
2296 {
2297 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
2298 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
2299 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
2300 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
2301 }
2302
2303 /*
2304 * The guest's view of its CR3 is unblemished with nested paging when the
2305 * guest is using paging or we have unrestricted guest execution to handle
2306 * the guest when it's not using paging.
2307 */
2308 }
2309#ifndef IN_NEM_DARWIN
2310 else
2311 {
2312 /*
2313 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
2314 * thinks it accesses physical memory directly, we use our identity-mapped
2315 * page table to map guest-linear to guest-physical addresses. EPT takes care
2316 * of translating it to host-physical addresses.
2317 */
2318 RTGCPHYS GCPhys;
2319 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2320
2321 /* We obtain it here every time as the guest could have relocated this PCI region. */
2322 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2323 if (RT_SUCCESS(rc))
2324 { /* likely */ }
2325 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
2326 {
2327 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
2328 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
2329 }
2330 else
2331 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
2332
2333 u64GuestCr3 = GCPhys;
2334 }
2335#endif
2336
2337 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
2338 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
2339 AssertRC(rc);
2340 }
2341 else
2342 {
2343 Assert(!pVmxTransient->fIsNestedGuest);
2344 /* Non-nested paging case, just use the hypervisor's CR3. */
2345 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
2346
2347 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
2348 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
2349 AssertRC(rc);
2350 }
2351
2352 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
2353 }
2354
2355 /*
2356 * Guest CR4.
2357 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
2358 */
2359 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
2360 {
2361 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2362 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2363
2364 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
2365 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
2366
2367 /*
2368 * With nested-guests, we may have extended the guest/host mask here (since we
2369 * merged in the outer guest's mask, see vmxHCMergeVmcsNested). This means, the
2370 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
2371 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
2372 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
2373 */
2374 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2375 uint64_t u64GuestCr4 = pCtx->cr4;
2376 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
2377 ? pCtx->cr4
2378 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
2379 Assert(!RT_HI_U32(u64GuestCr4));
2380
2381#ifndef IN_NEM_DARWIN
2382 /*
2383 * Setup VT-x's view of the guest CR4.
2384 *
2385 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
2386 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
2387 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2388 *
2389 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2390 */
2391 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2392 {
2393 Assert(pVM->hm.s.vmx.pRealModeTSS);
2394 Assert(PDMVmmDevHeapIsEnabled(pVM));
2395 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
2396 }
2397#endif
2398
2399 if (VM_IS_VMX_NESTED_PAGING(pVM))
2400 {
2401 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2402 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2403 {
2404 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2405 u64GuestCr4 |= X86_CR4_PSE;
2406 /* Our identity mapping is a 32-bit page directory. */
2407 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2408 }
2409 /* else use guest CR4.*/
2410 }
2411 else
2412 {
2413 Assert(!pVmxTransient->fIsNestedGuest);
2414
2415 /*
2416 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2417 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2418 */
2419 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
2420 {
2421 case PGMMODE_REAL: /* Real-mode. */
2422 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2423 case PGMMODE_32_BIT: /* 32-bit paging. */
2424 {
2425 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
2426 break;
2427 }
2428
2429 case PGMMODE_PAE: /* PAE paging. */
2430 case PGMMODE_PAE_NX: /* PAE paging with NX. */
2431 {
2432 u64GuestCr4 |= X86_CR4_PAE;
2433 break;
2434 }
2435
2436 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
2437 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
2438 {
2439#ifdef VBOX_WITH_64_BITS_GUESTS
2440 /* For our assumption in vmxHCShouldSwapEferMsr. */
2441 Assert(u64GuestCr4 & X86_CR4_PAE);
2442 break;
2443#endif
2444 }
2445 default:
2446 AssertFailed();
2447 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2448 }
2449 }
2450
2451 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
2452 u64GuestCr4 |= fSetCr4;
2453 u64GuestCr4 &= fZapCr4;
2454
2455 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
2456 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
2457 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
2458
2459#ifndef IN_NEM_DARWIN
2460 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
2461 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
2462 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
2463 {
2464 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
2465 hmR0VmxUpdateStartVmFunction(pVCpu);
2466 }
2467#endif
2468
2469 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
2470
2471 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
2472 }
2473 return rc;
2474}
2475
2476
2477#ifdef VBOX_STRICT
2478/**
2479 * Strict function to validate segment registers.
2480 *
2481 * @param pVCpu The cross context virtual CPU structure.
2482 * @param pVmcsInfo The VMCS info. object.
2483 *
2484 * @remarks Will import guest CR0 on strict builds during validation of
2485 * segments.
2486 */
2487static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2488{
2489 /*
2490 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2491 *
2492 * The reason we check for attribute value 0 in this function and not just the unusable bit is
2493 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
2494 * unusable bit and doesn't change the guest-context value.
2495 */
2496 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2497 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2498 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
2499 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
2500 && ( !CPUMIsGuestInRealModeEx(pCtx)
2501 && !CPUMIsGuestInV86ModeEx(pCtx)))
2502 {
2503 /* Protected mode checks */
2504 /* CS */
2505 Assert(pCtx->cs.Attr.n.u1Present);
2506 Assert(!(pCtx->cs.Attr.u & 0xf00));
2507 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
2508 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
2509 || !(pCtx->cs.Attr.n.u1Granularity));
2510 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
2511 || (pCtx->cs.Attr.n.u1Granularity));
2512 /* CS cannot be loaded with NULL in protected mode. */
2513 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
2514 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
2515 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
2516 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
2517 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
2518 else
2519 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
2520 /* SS */
2521 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2522 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
2523 if ( !(pCtx->cr0 & X86_CR0_PE)
2524 || pCtx->cs.Attr.n.u4Type == 3)
2525 {
2526 Assert(!pCtx->ss.Attr.n.u2Dpl);
2527 }
2528 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
2529 {
2530 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
2531 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
2532 Assert(pCtx->ss.Attr.n.u1Present);
2533 Assert(!(pCtx->ss.Attr.u & 0xf00));
2534 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
2535 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
2536 || !(pCtx->ss.Attr.n.u1Granularity));
2537 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
2538 || (pCtx->ss.Attr.n.u1Granularity));
2539 }
2540 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
2541 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
2542 {
2543 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2544 Assert(pCtx->ds.Attr.n.u1Present);
2545 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
2546 Assert(!(pCtx->ds.Attr.u & 0xf00));
2547 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
2548 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
2549 || !(pCtx->ds.Attr.n.u1Granularity));
2550 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
2551 || (pCtx->ds.Attr.n.u1Granularity));
2552 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2553 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
2554 }
2555 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
2556 {
2557 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2558 Assert(pCtx->es.Attr.n.u1Present);
2559 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
2560 Assert(!(pCtx->es.Attr.u & 0xf00));
2561 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
2562 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
2563 || !(pCtx->es.Attr.n.u1Granularity));
2564 Assert( !(pCtx->es.u32Limit & 0xfff00000)
2565 || (pCtx->es.Attr.n.u1Granularity));
2566 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2567 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
2568 }
2569 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
2570 {
2571 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2572 Assert(pCtx->fs.Attr.n.u1Present);
2573 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
2574 Assert(!(pCtx->fs.Attr.u & 0xf00));
2575 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
2576 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
2577 || !(pCtx->fs.Attr.n.u1Granularity));
2578 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
2579 || (pCtx->fs.Attr.n.u1Granularity));
2580 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2581 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2582 }
2583 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
2584 {
2585 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
2586 Assert(pCtx->gs.Attr.n.u1Present);
2587 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
2588 Assert(!(pCtx->gs.Attr.u & 0xf00));
2589 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
2590 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
2591 || !(pCtx->gs.Attr.n.u1Granularity));
2592 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
2593 || (pCtx->gs.Attr.n.u1Granularity));
2594 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
2595 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
2596 }
2597 /* 64-bit capable CPUs. */
2598 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2599 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
2600 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
2601 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
2602 }
2603 else if ( CPUMIsGuestInV86ModeEx(pCtx)
2604 || ( CPUMIsGuestInRealModeEx(pCtx)
2605 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2606 {
2607 /* Real and v86 mode checks. */
2608 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
2609 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
2610#ifndef IN_NEM_DARWIN
2611 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2612 {
2613 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
2614 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
2615 }
2616 else
2617#endif
2618 {
2619 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
2620 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
2621 }
2622
2623 /* CS */
2624 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
2625 Assert(pCtx->cs.u32Limit == 0xffff);
2626 Assert(u32CSAttr == 0xf3);
2627 /* SS */
2628 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
2629 Assert(pCtx->ss.u32Limit == 0xffff);
2630 Assert(u32SSAttr == 0xf3);
2631 /* DS */
2632 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
2633 Assert(pCtx->ds.u32Limit == 0xffff);
2634 Assert(u32DSAttr == 0xf3);
2635 /* ES */
2636 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
2637 Assert(pCtx->es.u32Limit == 0xffff);
2638 Assert(u32ESAttr == 0xf3);
2639 /* FS */
2640 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
2641 Assert(pCtx->fs.u32Limit == 0xffff);
2642 Assert(u32FSAttr == 0xf3);
2643 /* GS */
2644 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
2645 Assert(pCtx->gs.u32Limit == 0xffff);
2646 Assert(u32GSAttr == 0xf3);
2647 /* 64-bit capable CPUs. */
2648 Assert(!RT_HI_U32(pCtx->cs.u64Base));
2649 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
2650 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
2651 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
2652 }
2653}
2654#endif /* VBOX_STRICT */
2655
2656
2657/**
2658 * Exports a guest segment register into the guest-state area in the VMCS.
2659 *
2660 * @returns VBox status code.
2661 * @param pVCpu The cross context virtual CPU structure.
2662 * @param pVmcsInfo The VMCS info. object.
2663 * @param iSegReg The segment register number (X86_SREG_XXX).
2664 * @param pSelReg Pointer to the segment selector.
2665 *
2666 * @remarks No-long-jump zone!!!
2667 */
2668static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
2669{
2670 Assert(iSegReg < X86_SREG_COUNT);
2671
2672 uint32_t u32Access = pSelReg->Attr.u;
2673#ifndef IN_NEM_DARWIN
2674 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
2675#endif
2676 {
2677 /*
2678 * The way to differentiate between whether this is really a null selector or was just
2679 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
2680 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
2681 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
2682 * NULL selectors loaded in protected-mode have their attribute as 0.
2683 */
2684 if (u32Access)
2685 { }
2686 else
2687 u32Access = X86DESCATTR_UNUSABLE;
2688 }
2689#ifndef IN_NEM_DARWIN
2690 else
2691 {
2692 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
2693 u32Access = 0xf3;
2694 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2695 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2696 RT_NOREF_PV(pVCpu);
2697 }
2698#else
2699 RT_NOREF(pVmcsInfo);
2700#endif
2701
2702 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
2703 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
2704 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
2705
2706 /*
2707 * Commit it to the VMCS.
2708 */
2709 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
2710 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
2711 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
2712 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
2713 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
2714 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
2715 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
2716 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
2717 return VINF_SUCCESS;
2718}
2719
2720
2721/**
2722 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
2723 * area in the VMCS.
2724 *
2725 * @returns VBox status code.
2726 * @param pVCpu The cross context virtual CPU structure.
2727 * @param pVmxTransient The VMX-transient structure.
2728 *
2729 * @remarks Will import guest CR0 on strict builds during validation of
2730 * segments.
2731 * @remarks No-long-jump zone!!!
2732 */
2733static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2734{
2735 int rc = VERR_INTERNAL_ERROR_5;
2736#ifndef IN_NEM_DARWIN
2737 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2738#endif
2739 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2740 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2741#ifndef IN_NEM_DARWIN
2742 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
2743#endif
2744
2745 /*
2746 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
2747 */
2748 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
2749 {
2750 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
2751 {
2752 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
2753#ifndef IN_NEM_DARWIN
2754 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2755 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
2756#endif
2757 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
2758 AssertRC(rc);
2759 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
2760 }
2761
2762 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
2763 {
2764 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
2765#ifndef IN_NEM_DARWIN
2766 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2767 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
2768#endif
2769 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
2770 AssertRC(rc);
2771 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
2772 }
2773
2774 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
2775 {
2776 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
2777#ifndef IN_NEM_DARWIN
2778 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2779 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
2780#endif
2781 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
2782 AssertRC(rc);
2783 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
2784 }
2785
2786 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
2787 {
2788 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
2789#ifndef IN_NEM_DARWIN
2790 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2791 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
2792#endif
2793 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
2794 AssertRC(rc);
2795 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
2796 }
2797
2798 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
2799 {
2800 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
2801#ifndef IN_NEM_DARWIN
2802 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2803 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
2804#endif
2805 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
2806 AssertRC(rc);
2807 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
2808 }
2809
2810 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
2811 {
2812 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
2813#ifndef IN_NEM_DARWIN
2814 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
2815 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
2816#endif
2817 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
2818 AssertRC(rc);
2819 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
2820 }
2821
2822#ifdef VBOX_STRICT
2823 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
2824#endif
2825 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
2826 pCtx->cs.Attr.u));
2827 }
2828
2829 /*
2830 * Guest TR.
2831 */
2832 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
2833 {
2834 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
2835
2836 /*
2837 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
2838 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
2839 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
2840 */
2841 uint16_t u16Sel;
2842 uint32_t u32Limit;
2843 uint64_t u64Base;
2844 uint32_t u32AccessRights;
2845#ifndef IN_NEM_DARWIN
2846 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
2847#endif
2848 {
2849 u16Sel = pCtx->tr.Sel;
2850 u32Limit = pCtx->tr.u32Limit;
2851 u64Base = pCtx->tr.u64Base;
2852 u32AccessRights = pCtx->tr.Attr.u;
2853 }
2854#ifndef IN_NEM_DARWIN
2855 else
2856 {
2857 Assert(!pVmxTransient->fIsNestedGuest);
2858 Assert(pVM->hm.s.vmx.pRealModeTSS);
2859 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
2860
2861 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
2862 RTGCPHYS GCPhys;
2863 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
2864 AssertRCReturn(rc, rc);
2865
2866 X86DESCATTR DescAttr;
2867 DescAttr.u = 0;
2868 DescAttr.n.u1Present = 1;
2869 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2870
2871 u16Sel = 0;
2872 u32Limit = HM_VTX_TSS_SIZE;
2873 u64Base = GCPhys;
2874 u32AccessRights = DescAttr.u;
2875 }
2876#endif
2877
2878 /* Validate. */
2879 Assert(!(u16Sel & RT_BIT(2)));
2880 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
2881 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
2882 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
2883 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
2884 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
2885 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
2886 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
2887 Assert( (u32Limit & 0xfff) == 0xfff
2888 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
2889 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
2890 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
2891
2892 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
2893 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
2894 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
2895 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
2896
2897 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
2898 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
2899 }
2900
2901 /*
2902 * Guest GDTR.
2903 */
2904 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
2905 {
2906 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
2907
2908 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
2909 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
2910
2911 /* Validate. */
2912 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2913
2914 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
2915 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
2916 }
2917
2918 /*
2919 * Guest LDTR.
2920 */
2921 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
2922 {
2923 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
2924
2925 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
2926 uint32_t u32Access;
2927 if ( !pVmxTransient->fIsNestedGuest
2928 && !pCtx->ldtr.Attr.u)
2929 u32Access = X86DESCATTR_UNUSABLE;
2930 else
2931 u32Access = pCtx->ldtr.Attr.u;
2932
2933 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
2934 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
2935 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
2936 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
2937
2938 /* Validate. */
2939 if (!(u32Access & X86DESCATTR_UNUSABLE))
2940 {
2941 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
2942 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
2943 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
2944 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
2945 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
2946 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
2947 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
2948 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
2949 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
2950 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
2951 }
2952
2953 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
2954 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
2955 }
2956
2957 /*
2958 * Guest IDTR.
2959 */
2960 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
2961 {
2962 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
2963
2964 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
2965 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
2966
2967 /* Validate. */
2968 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
2969
2970 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
2971 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
2972 }
2973
2974 return VINF_SUCCESS;
2975}
2976
2977
2978/**
2979 * Gets the IEM exception flags for the specified vector and IDT vectoring /
2980 * VM-exit interruption info type.
2981 *
2982 * @returns The IEM exception flags.
2983 * @param uVector The event vector.
2984 * @param uVmxEventType The VMX event type.
2985 *
2986 * @remarks This function currently only constructs flags required for
2987 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
2988 * and CR2 aspects of an exception are not included).
2989 */
2990static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
2991{
2992 uint32_t fIemXcptFlags;
2993 switch (uVmxEventType)
2994 {
2995 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
2996 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
2997 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
2998 break;
2999
3000 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
3001 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
3002 break;
3003
3004 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
3005 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
3006 break;
3007
3008 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
3009 {
3010 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3011 if (uVector == X86_XCPT_BP)
3012 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
3013 else if (uVector == X86_XCPT_OF)
3014 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
3015 else
3016 {
3017 fIemXcptFlags = 0;
3018 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
3019 }
3020 break;
3021 }
3022
3023 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
3024 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
3025 break;
3026
3027 default:
3028 fIemXcptFlags = 0;
3029 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
3030 break;
3031 }
3032 return fIemXcptFlags;
3033}
3034
3035
3036/**
3037 * Sets an event as a pending event to be injected into the guest.
3038 *
3039 * @param pVCpu The cross context virtual CPU structure.
3040 * @param u32IntInfo The VM-entry interruption-information field.
3041 * @param cbInstr The VM-entry instruction length in bytes (for
3042 * software interrupts, exceptions and privileged
3043 * software exceptions).
3044 * @param u32ErrCode The VM-entry exception error code.
3045 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3046 * page-fault.
3047 */
3048DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
3049 RTGCUINTPTR GCPtrFaultAddress)
3050{
3051 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3052 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
3053 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
3054 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
3055 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
3056 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
3057}
3058
3059
3060/**
3061 * Sets an external interrupt as pending-for-injection into the VM.
3062 *
3063 * @param pVCpu The cross context virtual CPU structure.
3064 * @param u8Interrupt The external interrupt vector.
3065 */
3066DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
3067{
3068 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
3069 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3070 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3071 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3072 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3073}
3074
3075
3076/**
3077 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
3078 *
3079 * @param pVCpu The cross context virtual CPU structure.
3080 */
3081DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
3082{
3083 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
3084 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
3085 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3086 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3087 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3088}
3089
3090
3091/**
3092 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
3093 *
3094 * @param pVCpu The cross context virtual CPU structure.
3095 */
3096DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
3097{
3098 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
3099 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3100 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3101 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3102 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3103}
3104
3105
3106/**
3107 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3108 *
3109 * @param pVCpu The cross context virtual CPU structure.
3110 */
3111DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
3112{
3113 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
3114 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3115 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3116 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3117 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3118}
3119
3120
3121/**
3122 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3123 *
3124 * @param pVCpu The cross context virtual CPU structure.
3125 */
3126DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
3127{
3128 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
3129 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3130 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
3131 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3132 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
3133}
3134
3135
3136#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3137/**
3138 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
3139 *
3140 * @param pVCpu The cross context virtual CPU structure.
3141 * @param u32ErrCode The error code for the general-protection exception.
3142 */
3143DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3144{
3145 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
3146 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3147 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3148 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3149 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3150}
3151
3152
3153/**
3154 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
3155 *
3156 * @param pVCpu The cross context virtual CPU structure.
3157 * @param u32ErrCode The error code for the stack exception.
3158 */
3159DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
3160{
3161 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
3162 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3163 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
3164 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
3165 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
3166}
3167#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3168
3169
3170/**
3171 * Fixes up attributes for the specified segment register.
3172 *
3173 * @param pVCpu The cross context virtual CPU structure.
3174 * @param pSelReg The segment register that needs fixing.
3175 * @param pszRegName The register name (for logging and assertions).
3176 */
3177static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
3178{
3179 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
3180
3181 /*
3182 * If VT-x marks the segment as unusable, most other bits remain undefined:
3183 * - For CS the L, D and G bits have meaning.
3184 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
3185 * - For the remaining data segments no bits are defined.
3186 *
3187 * The present bit and the unusable bit has been observed to be set at the
3188 * same time (the selector was supposed to be invalid as we started executing
3189 * a V8086 interrupt in ring-0).
3190 *
3191 * What should be important for the rest of the VBox code, is that the P bit is
3192 * cleared. Some of the other VBox code recognizes the unusable bit, but
3193 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
3194 * safe side here, we'll strip off P and other bits we don't care about. If
3195 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
3196 *
3197 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
3198 */
3199#ifdef VBOX_STRICT
3200 uint32_t const uAttr = pSelReg->Attr.u;
3201#endif
3202
3203 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
3204 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
3205 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
3206
3207#ifdef VBOX_STRICT
3208# ifndef IN_NEM_DARWIN
3209 VMMRZCallRing3Disable(pVCpu);
3210# endif
3211 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
3212# ifdef DEBUG_bird
3213 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
3214 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
3215 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
3216# endif
3217# ifndef IN_NEM_DARWIN
3218 VMMRZCallRing3Enable(pVCpu);
3219# endif
3220 NOREF(uAttr);
3221#endif
3222 RT_NOREF2(pVCpu, pszRegName);
3223}
3224
3225
3226/**
3227 * Imports a guest segment register from the current VMCS into the guest-CPU
3228 * context.
3229 *
3230 * @param pVCpu The cross context virtual CPU structure.
3231 * @param iSegReg The segment register number (X86_SREG_XXX).
3232 *
3233 * @remarks Called with interrupts and/or preemption disabled.
3234 */
3235static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
3236{
3237 Assert(iSegReg < X86_SREG_COUNT);
3238 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
3239 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
3240 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
3241 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
3242
3243 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
3244
3245 uint16_t u16Sel;
3246 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
3247 pSelReg->Sel = u16Sel;
3248 pSelReg->ValidSel = u16Sel;
3249
3250 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
3251 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
3252
3253 uint32_t u32Attr;
3254 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
3255 pSelReg->Attr.u = u32Attr;
3256 if (u32Attr & X86DESCATTR_UNUSABLE)
3257 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
3258
3259 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
3260}
3261
3262
3263/**
3264 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
3265 *
3266 * @param pVCpu The cross context virtual CPU structure.
3267 *
3268 * @remarks Called with interrupts and/or preemption disabled.
3269 */
3270static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
3271{
3272 uint16_t u16Sel;
3273 uint64_t u64Base;
3274 uint32_t u32Limit, u32Attr;
3275 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
3276 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
3277 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3278 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
3279
3280 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
3281 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
3282 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3283 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
3284 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
3285 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
3286 if (u32Attr & X86DESCATTR_UNUSABLE)
3287 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
3288}
3289
3290
3291/**
3292 * Imports the guest TR from the current VMCS into the guest-CPU context.
3293 *
3294 * @param pVCpu The cross context virtual CPU structure.
3295 *
3296 * @remarks Called with interrupts and/or preemption disabled.
3297 */
3298static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
3299{
3300 uint16_t u16Sel;
3301 uint64_t u64Base;
3302 uint32_t u32Limit, u32Attr;
3303 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
3304 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
3305 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
3306 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
3307
3308 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
3309 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
3310 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3311 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
3312 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
3313 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
3314 /* TR is the only selector that can never be unusable. */
3315 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
3316}
3317
3318
3319/**
3320 * Imports the guest RIP from the VMCS back into the guest-CPU context.
3321 *
3322 * @param pVCpu The cross context virtual CPU structure.
3323 *
3324 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3325 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3326 * instead!!!
3327 */
3328static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
3329{
3330 uint64_t u64Val;
3331 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3332 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
3333 {
3334 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
3335 AssertRC(rc);
3336
3337 pCtx->rip = u64Val;
3338 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
3339 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
3340 }
3341}
3342
3343
3344/**
3345 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
3346 *
3347 * @param pVCpu The cross context virtual CPU structure.
3348 * @param pVmcsInfo The VMCS info. object.
3349 *
3350 * @remarks Called with interrupts and/or preemption disabled, should not assert!
3351 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3352 * instead!!!
3353 */
3354static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3355{
3356 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3357 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
3358 {
3359 uint64_t u64Val;
3360 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
3361 AssertRC(rc);
3362
3363 pCtx->rflags.u64 = u64Val;
3364#ifndef IN_NEM_DARWIN
3365 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3366 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
3367 {
3368 pCtx->eflags.Bits.u1VM = 0;
3369 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
3370 }
3371#else
3372 RT_NOREF(pVmcsInfo);
3373#endif
3374 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
3375 }
3376}
3377
3378
3379/**
3380 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
3381 * context.
3382 *
3383 * @param pVCpu The cross context virtual CPU structure.
3384 * @param pVmcsInfo The VMCS info. object.
3385 *
3386 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
3387 * do not log!
3388 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
3389 * instead!!!
3390 */
3391static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3392{
3393 uint32_t u32Val;
3394 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
3395 if (!u32Val)
3396 {
3397 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3398 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3399 CPUMSetGuestNmiBlocking(pVCpu, false);
3400 }
3401 else
3402 {
3403 /*
3404 * We must import RIP here to set our EM interrupt-inhibited state.
3405 * We also import RFLAGS as our code that evaluates pending interrupts
3406 * before VM-entry requires it.
3407 */
3408 vmxHCImportGuestRip(pVCpu);
3409 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3410
3411 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3412 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
3413 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3414 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3415
3416 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
3417 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
3418 }
3419}
3420
3421
3422/**
3423 * Worker for VMXR0ImportStateOnDemand.
3424 *
3425 * @returns VBox status code.
3426 * @param pVCpu The cross context virtual CPU structure.
3427 * @param pVmcsInfo The VMCS info. object.
3428 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3429 */
3430static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
3431{
3432 int rc = VINF_SUCCESS;
3433 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3434 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3435 uint32_t u32Val;
3436
3437 /*
3438 * Note! This is hack to workaround a mysterious BSOD observed with release builds
3439 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
3440 * neither are other host platforms.
3441 *
3442 * Committing this temporarily as it prevents BSOD.
3443 *
3444 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
3445 */
3446# ifdef RT_OS_WINDOWS
3447 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
3448 return VERR_HM_IPE_1;
3449# endif
3450
3451 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3452
3453#ifndef IN_NEM_DARWIN
3454 /*
3455 * We disable interrupts to make the updating of the state and in particular
3456 * the fExtrn modification atomic wrt to preemption hooks.
3457 */
3458 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3459#endif
3460
3461 fWhat &= pCtx->fExtrn;
3462 if (fWhat)
3463 {
3464 do
3465 {
3466 if (fWhat & CPUMCTX_EXTRN_RIP)
3467 vmxHCImportGuestRip(pVCpu);
3468
3469 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
3470 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
3471
3472 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
3473 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
3474
3475 if (fWhat & CPUMCTX_EXTRN_RSP)
3476 {
3477 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
3478 AssertRC(rc);
3479 }
3480
3481 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
3482 {
3483 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3484#ifndef IN_NEM_DARWIN
3485 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3486#else
3487 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
3488#endif
3489 if (fWhat & CPUMCTX_EXTRN_CS)
3490 {
3491 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
3492 vmxHCImportGuestRip(pVCpu);
3493 if (fRealOnV86Active)
3494 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
3495 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
3496 }
3497 if (fWhat & CPUMCTX_EXTRN_SS)
3498 {
3499 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
3500 if (fRealOnV86Active)
3501 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
3502 }
3503 if (fWhat & CPUMCTX_EXTRN_DS)
3504 {
3505 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
3506 if (fRealOnV86Active)
3507 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
3508 }
3509 if (fWhat & CPUMCTX_EXTRN_ES)
3510 {
3511 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
3512 if (fRealOnV86Active)
3513 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
3514 }
3515 if (fWhat & CPUMCTX_EXTRN_FS)
3516 {
3517 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
3518 if (fRealOnV86Active)
3519 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
3520 }
3521 if (fWhat & CPUMCTX_EXTRN_GS)
3522 {
3523 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
3524 if (fRealOnV86Active)
3525 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
3526 }
3527 }
3528
3529 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
3530 {
3531 if (fWhat & CPUMCTX_EXTRN_LDTR)
3532 vmxHCImportGuestLdtr(pVCpu);
3533
3534 if (fWhat & CPUMCTX_EXTRN_GDTR)
3535 {
3536 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
3537 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
3538 pCtx->gdtr.cbGdt = u32Val;
3539 }
3540
3541 /* Guest IDTR. */
3542 if (fWhat & CPUMCTX_EXTRN_IDTR)
3543 {
3544 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
3545 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
3546 pCtx->idtr.cbIdt = u32Val;
3547 }
3548
3549 /* Guest TR. */
3550 if (fWhat & CPUMCTX_EXTRN_TR)
3551 {
3552#ifndef IN_NEM_DARWIN
3553 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
3554 don't need to import that one. */
3555 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3556#endif
3557 vmxHCImportGuestTr(pVCpu);
3558 }
3559 }
3560
3561 if (fWhat & CPUMCTX_EXTRN_DR7)
3562 {
3563#ifndef IN_NEM_DARWIN
3564 if (!pVCpu->hmr0.s.fUsingHyperDR7)
3565#endif
3566 {
3567 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
3568 AssertRC(rc);
3569 }
3570 }
3571
3572 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
3573 {
3574 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
3575 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
3576 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
3577 pCtx->SysEnter.cs = u32Val;
3578 }
3579
3580#ifndef IN_NEM_DARWIN
3581 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
3582 {
3583 if ( pVM->hmr0.s.fAllow64BitGuests
3584 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3585 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
3586 }
3587
3588 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
3589 {
3590 if ( pVM->hmr0.s.fAllow64BitGuests
3591 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
3592 {
3593 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3594 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
3595 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
3596 }
3597 }
3598
3599 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
3600 {
3601 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
3602 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
3603 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
3604 Assert(pMsrs);
3605 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
3606 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
3607 for (uint32_t i = 0; i < cMsrs; i++)
3608 {
3609 uint32_t const idMsr = pMsrs[i].u32Msr;
3610 switch (idMsr)
3611 {
3612 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
3613 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
3614 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
3615 default:
3616 {
3617 uint32_t idxLbrMsr;
3618 if (VM_IS_VMX_LBR(pVM))
3619 {
3620 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
3621 {
3622 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3623 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3624 break;
3625 }
3626 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
3627 {
3628 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
3629 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
3630 break;
3631 }
3632 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
3633 {
3634 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
3635 break;
3636 }
3637 /* Fallthru (no break) */
3638 }
3639 pCtx->fExtrn = 0;
3640 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
3641 ASMSetFlags(fEFlags);
3642 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
3643 return VERR_HM_UNEXPECTED_LD_ST_MSR;
3644 }
3645 }
3646 }
3647 }
3648#endif
3649
3650 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
3651 {
3652 if (fWhat & CPUMCTX_EXTRN_CR0)
3653 {
3654 uint64_t u64Cr0;
3655 uint64_t u64Shadow;
3656 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
3657 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
3658#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3659 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3660 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3661#else
3662 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3663 {
3664 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3665 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
3666 }
3667 else
3668 {
3669 /*
3670 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
3671 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3672 * re-construct CR0. See @bugref{9180#c95} for details.
3673 */
3674 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3675 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3676 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
3677 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
3678 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
3679 }
3680#endif
3681#ifndef IN_NEM_DARWIN
3682 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
3683#endif
3684 CPUMSetGuestCR0(pVCpu, u64Cr0);
3685#ifndef IN_NEM_DARWIN
3686 VMMRZCallRing3Enable(pVCpu);
3687#endif
3688 }
3689
3690 if (fWhat & CPUMCTX_EXTRN_CR4)
3691 {
3692 uint64_t u64Cr4;
3693 uint64_t u64Shadow;
3694 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
3695 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
3696#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
3697 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3698 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3699#else
3700 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
3701 {
3702 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3703 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
3704 }
3705 else
3706 {
3707 /*
3708 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
3709 * the nested-guest using hardware-assisted VMX. Accordingly we need to
3710 * re-construct CR4. See @bugref{9180#c95} for details.
3711 */
3712 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
3713 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3714 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
3715 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
3716 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
3717 }
3718#endif
3719 pCtx->cr4 = u64Cr4;
3720 }
3721
3722 if (fWhat & CPUMCTX_EXTRN_CR3)
3723 {
3724 /* CR0.PG bit changes are always intercepted, so it's up to date. */
3725 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3726 || ( VM_IS_VMX_NESTED_PAGING(pVM)
3727 && CPUMIsGuestPagingEnabledEx(pCtx)))
3728 {
3729 uint64_t u64Cr3;
3730 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
3731 if (pCtx->cr3 != u64Cr3)
3732 {
3733 pCtx->cr3 = u64Cr3;
3734 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3735 }
3736
3737 /*
3738 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
3739 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
3740 */
3741 if (CPUMIsGuestInPAEModeEx(pCtx))
3742 {
3743 X86PDPE aPaePdpes[4];
3744 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
3745 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
3746 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
3747 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
3748 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
3749 {
3750 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
3751 /* PGM now updates PAE PDPTEs while updating CR3. */
3752 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
3753 }
3754 }
3755 }
3756 }
3757 }
3758
3759#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3760 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
3761 {
3762 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3763 && !CPUMIsGuestInVmxNonRootMode(pCtx))
3764 {
3765 Assert(CPUMIsGuestInVmxRootMode(pCtx));
3766 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
3767 if (RT_SUCCESS(rc))
3768 { /* likely */ }
3769 else
3770 break;
3771 }
3772 }
3773#endif
3774 } while (0);
3775
3776 if (RT_SUCCESS(rc))
3777 {
3778 /* Update fExtrn. */
3779 pCtx->fExtrn &= ~fWhat;
3780
3781 /* If everything has been imported, clear the HM keeper bit. */
3782 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
3783 {
3784#ifndef IN_NEM_DARWIN
3785 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
3786#else
3787 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
3788#endif
3789 Assert(!pCtx->fExtrn);
3790 }
3791 }
3792 }
3793#ifndef IN_NEM_DARWIN
3794 else
3795 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
3796
3797 /*
3798 * Restore interrupts.
3799 */
3800 ASMSetFlags(fEFlags);
3801#endif
3802
3803 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
3804
3805 if (RT_SUCCESS(rc))
3806 { /* likely */ }
3807 else
3808 return rc;
3809
3810 /*
3811 * Honor any pending CR3 updates.
3812 *
3813 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
3814 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
3815 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
3816 *
3817 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
3818 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
3819 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
3820 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
3821 *
3822 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
3823 *
3824 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
3825 */
3826 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
3827#ifndef IN_NEM_DARWIN
3828 && VMMRZCallRing3IsEnabled(pVCpu)
3829#endif
3830 )
3831 {
3832 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
3833 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
3834 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
3835 }
3836
3837 return VINF_SUCCESS;
3838}
3839
3840
3841/**
3842 * Check per-VM and per-VCPU force flag actions that require us to go back to
3843 * ring-3 for one reason or another.
3844 *
3845 * @returns Strict VBox status code (i.e. informational status codes too)
3846 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3847 * ring-3.
3848 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3849 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3850 * interrupts)
3851 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3852 * all EMTs to be in ring-3.
3853 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3854 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3855 * to the EM loop.
3856 *
3857 * @param pVCpu The cross context virtual CPU structure.
3858 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
3859 * @param fStepping Whether we are single-stepping the guest using the
3860 * hypervisor debugger.
3861 *
3862 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
3863 * is no longer in VMX non-root mode.
3864 */
3865static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
3866{
3867#ifndef IN_NEM_DARWIN
3868 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3869#endif
3870
3871 /*
3872 * Update pending interrupts into the APIC's IRR.
3873 */
3874 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3875 APICUpdatePendingInterrupts(pVCpu);
3876
3877 /*
3878 * Anything pending? Should be more likely than not if we're doing a good job.
3879 */
3880 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3881 if ( !fStepping
3882 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
3883 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
3884 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
3885 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3886 return VINF_SUCCESS;
3887
3888 /* Pending PGM C3 sync. */
3889 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
3890 {
3891 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3892 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
3893 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
3894 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
3895 if (rcStrict != VINF_SUCCESS)
3896 {
3897 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
3898 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
3899 return rcStrict;
3900 }
3901 }
3902
3903 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
3904 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
3905 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3906 {
3907 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
3908 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
3909 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
3910 return rc;
3911 }
3912
3913 /* Pending VM request packets, such as hardware interrupts. */
3914 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
3915 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
3916 {
3917 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
3918 Log4Func(("Pending VM request forcing us back to ring-3\n"));
3919 return VINF_EM_PENDING_REQUEST;
3920 }
3921
3922 /* Pending PGM pool flushes. */
3923 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3924 {
3925 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
3926 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
3927 return VINF_PGM_POOL_FLUSH_PENDING;
3928 }
3929
3930 /* Pending DMA requests. */
3931 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
3932 {
3933 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
3934 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
3935 return VINF_EM_RAW_TO_R3;
3936 }
3937
3938#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3939 /*
3940 * Pending nested-guest events.
3941 *
3942 * Please note the priority of these events are specified and important.
3943 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
3944 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
3945 */
3946 if (fIsNestedGuest)
3947 {
3948 /* Pending nested-guest APIC-write. */
3949 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3950 {
3951 Log4Func(("Pending nested-guest APIC-write\n"));
3952 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
3953 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3954 return rcStrict;
3955 }
3956
3957 /* Pending nested-guest monitor-trap flag (MTF). */
3958 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
3959 {
3960 Log4Func(("Pending nested-guest MTF\n"));
3961 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
3962 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3963 return rcStrict;
3964 }
3965
3966 /* Pending nested-guest VMX-preemption timer expired. */
3967 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
3968 {
3969 Log4Func(("Pending nested-guest preempt timer\n"));
3970 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
3971 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
3972 return rcStrict;
3973 }
3974 }
3975#else
3976 NOREF(fIsNestedGuest);
3977#endif
3978
3979 return VINF_SUCCESS;
3980}
3981
3982
3983/**
3984 * Converts any TRPM trap into a pending HM event. This is typically used when
3985 * entering from ring-3 (not longjmp returns).
3986 *
3987 * @param pVCpu The cross context virtual CPU structure.
3988 */
3989static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3990{
3991 Assert(TRPMHasTrap(pVCpu));
3992 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
3993
3994 uint8_t uVector;
3995 TRPMEVENT enmTrpmEvent;
3996 uint32_t uErrCode;
3997 RTGCUINTPTR GCPtrFaultAddress;
3998 uint8_t cbInstr;
3999 bool fIcebp;
4000
4001 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
4002 AssertRC(rc);
4003
4004 uint32_t u32IntInfo;
4005 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
4006 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
4007
4008 rc = TRPMResetTrap(pVCpu);
4009 AssertRC(rc);
4010 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
4011 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
4012
4013 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
4014}
4015
4016
4017/**
4018 * Converts the pending HM event into a TRPM trap.
4019 *
4020 * @param pVCpu The cross context virtual CPU structure.
4021 */
4022static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
4023{
4024 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4025
4026 /* If a trap was already pending, we did something wrong! */
4027 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
4028
4029 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
4030 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
4031 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
4032
4033 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
4034
4035 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
4036 AssertRC(rc);
4037
4038 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4039 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
4040
4041 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
4042 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
4043 else
4044 {
4045 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
4046 switch (uVectorType)
4047 {
4048 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4049 TRPMSetTrapDueToIcebp(pVCpu);
4050 RT_FALL_THRU();
4051 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4052 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4053 {
4054 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4055 || ( uVector == X86_XCPT_BP /* INT3 */
4056 || uVector == X86_XCPT_OF /* INTO */
4057 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
4058 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
4059 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
4060 break;
4061 }
4062 }
4063 }
4064
4065 /* We're now done converting the pending event. */
4066 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4067}
4068
4069
4070/**
4071 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
4072 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
4073 *
4074 * @param pVCpu The cross context virtual CPU structure.
4075 * @param pVmcsInfo The VMCS info. object.
4076 */
4077static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4078{
4079 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4080 {
4081 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4082 {
4083 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
4084 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4085 AssertRC(rc);
4086 }
4087 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
4088}
4089
4090
4091/**
4092 * Clears the interrupt-window exiting control in the VMCS.
4093 *
4094 * @param pVCpu The cross context virtual CPU structure.
4095 * @param pVmcsInfo The VMCS info. object.
4096 */
4097DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4098{
4099 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
4100 {
4101 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
4102 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4103 AssertRC(rc);
4104 }
4105}
4106
4107
4108/**
4109 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
4110 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
4111 *
4112 * @param pVCpu The cross context virtual CPU structure.
4113 * @param pVmcsInfo The VMCS info. object.
4114 */
4115static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4116{
4117 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4118 {
4119 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4120 {
4121 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4122 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4123 AssertRC(rc);
4124 Log4Func(("Setup NMI-window exiting\n"));
4125 }
4126 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
4127}
4128
4129
4130/**
4131 * Clears the NMI-window exiting control in the VMCS.
4132 *
4133 * @param pVCpu The cross context virtual CPU structure.
4134 * @param pVmcsInfo The VMCS info. object.
4135 */
4136DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4137{
4138 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
4139 {
4140 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
4141 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
4142 AssertRC(rc);
4143 }
4144}
4145
4146
4147/**
4148 * Injects an event into the guest upon VM-entry by updating the relevant fields
4149 * in the VM-entry area in the VMCS.
4150 *
4151 * @returns Strict VBox status code (i.e. informational status codes too).
4152 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
4153 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
4154 *
4155 * @param pVCpu The cross context virtual CPU structure.
4156 * @param pVmxTransient The VMX-transient structure.
4157 * @param pEvent The event being injected.
4158 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
4159 * will be updated if necessary. This cannot not be NULL.
4160 * @param fStepping Whether we're single-stepping guest execution and should
4161 * return VINF_EM_DBG_STEPPED if the event is injected
4162 * directly (registers modified by us, not by hardware on
4163 * VM-entry).
4164 */
4165static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent, bool fStepping,
4166 uint32_t *pfIntrState)
4167{
4168 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
4169 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
4170 Assert(pfIntrState);
4171
4172#ifdef IN_NEM_DARWIN
4173 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
4174#endif
4175
4176 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4177 uint32_t u32IntInfo = pEvent->u64IntInfo;
4178 uint32_t const u32ErrCode = pEvent->u32ErrCode;
4179 uint32_t const cbInstr = pEvent->cbInstr;
4180 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
4181 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
4182 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
4183
4184#ifdef VBOX_STRICT
4185 /*
4186 * Validate the error-code-valid bit for hardware exceptions.
4187 * No error codes for exceptions in real-mode.
4188 *
4189 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4190 */
4191 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4192 && !CPUMIsGuestInRealModeEx(pCtx))
4193 {
4194 switch (uVector)
4195 {
4196 case X86_XCPT_PF:
4197 case X86_XCPT_DF:
4198 case X86_XCPT_TS:
4199 case X86_XCPT_NP:
4200 case X86_XCPT_SS:
4201 case X86_XCPT_GP:
4202 case X86_XCPT_AC:
4203 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
4204 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
4205 RT_FALL_THRU();
4206 default:
4207 break;
4208 }
4209 }
4210
4211 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
4212 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
4213 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4214#endif
4215
4216 RT_NOREF(uVector);
4217 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
4218 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
4219 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
4220 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
4221 {
4222 Assert(uVector <= X86_XCPT_LAST);
4223 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
4224 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
4225 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
4226 }
4227 else
4228 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
4229
4230 /*
4231 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
4232 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
4233 * interrupt handler in the (real-mode) guest.
4234 *
4235 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
4236 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
4237 */
4238 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
4239 {
4240#ifndef IN_NEM_DARWIN
4241 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
4242#endif
4243 {
4244 /*
4245 * For CPUs with unrestricted guest execution enabled and with the guest
4246 * in real-mode, we must not set the deliver-error-code bit.
4247 *
4248 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4249 */
4250 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
4251 }
4252#ifndef IN_NEM_DARWIN
4253 else
4254 {
4255 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4256 Assert(PDMVmmDevHeapIsEnabled(pVM));
4257 Assert(pVM->hm.s.vmx.pRealModeTSS);
4258 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
4259
4260 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
4261 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
4262 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
4263 AssertRCReturn(rc2, rc2);
4264
4265 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
4266 size_t const cbIdtEntry = sizeof(X86IDTR16);
4267 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
4268 {
4269 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
4270 if (uVector == X86_XCPT_DF)
4271 return VINF_EM_RESET;
4272
4273 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
4274 No error codes for exceptions in real-mode. */
4275 if (uVector == X86_XCPT_GP)
4276 {
4277 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4278 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4279 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4280 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4281 HMEVENT EventXcptDf;
4282 RT_ZERO(EventXcptDf);
4283 EventXcptDf.u64IntInfo = uXcptDfInfo;
4284 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState);
4285 }
4286
4287 /*
4288 * If we're injecting an event with no valid IDT entry, inject a #GP.
4289 * No error codes for exceptions in real-mode.
4290 *
4291 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
4292 */
4293 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4294 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
4295 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4296 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4297 HMEVENT EventXcptGp;
4298 RT_ZERO(EventXcptGp);
4299 EventXcptGp.u64IntInfo = uXcptGpInfo;
4300 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState);
4301 }
4302
4303 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
4304 uint16_t uGuestIp = pCtx->ip;
4305 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
4306 {
4307 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
4308 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
4309 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4310 }
4311 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
4312 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
4313
4314 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
4315 X86IDTR16 IdtEntry;
4316 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
4317 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
4318 AssertRCReturn(rc2, rc2);
4319
4320 /* Construct the stack frame for the interrupt/exception handler. */
4321 VBOXSTRICTRC rcStrict;
4322 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
4323 if (rcStrict == VINF_SUCCESS)
4324 {
4325 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
4326 if (rcStrict == VINF_SUCCESS)
4327 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
4328 }
4329
4330 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
4331 if (rcStrict == VINF_SUCCESS)
4332 {
4333 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
4334 pCtx->rip = IdtEntry.offSel;
4335 pCtx->cs.Sel = IdtEntry.uSel;
4336 pCtx->cs.ValidSel = IdtEntry.uSel;
4337 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
4338 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4339 && uVector == X86_XCPT_PF)
4340 pCtx->cr2 = GCPtrFault;
4341
4342 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
4343 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
4344 | HM_CHANGED_GUEST_RSP);
4345
4346 /*
4347 * If we delivered a hardware exception (other than an NMI) and if there was
4348 * block-by-STI in effect, we should clear it.
4349 */
4350 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
4351 {
4352 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
4353 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
4354 Log4Func(("Clearing inhibition due to STI\n"));
4355 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4356 }
4357
4358 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
4359 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
4360
4361 /*
4362 * The event has been truly dispatched to the guest. Mark it as no longer pending so
4363 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
4364 */
4365 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
4366
4367 /*
4368 * If we eventually support nested-guest execution without unrestricted guest execution,
4369 * we should set fInterceptEvents here.
4370 */
4371 Assert(!fIsNestedGuest);
4372
4373 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
4374 if (fStepping)
4375 rcStrict = VINF_EM_DBG_STEPPED;
4376 }
4377 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
4378 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4379 return rcStrict;
4380 }
4381#else
4382 RT_NOREF(pVmcsInfo);
4383#endif
4384 }
4385
4386 /*
4387 * Validate.
4388 */
4389 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
4390 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
4391
4392 /*
4393 * Inject the event into the VMCS.
4394 */
4395 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
4396 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
4397 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
4398 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
4399 AssertRC(rc);
4400
4401 /*
4402 * Update guest CR2 if this is a page-fault.
4403 */
4404 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
4405 pCtx->cr2 = GCPtrFault;
4406
4407 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
4408 return VINF_SUCCESS;
4409}
4410
4411
4412/**
4413 * Evaluates the event to be delivered to the guest and sets it as the pending
4414 * event.
4415 *
4416 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
4417 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
4418 * NOT restore these force-flags.
4419 *
4420 * @returns Strict VBox status code (i.e. informational status codes too).
4421 * @param pVCpu The cross context virtual CPU structure.
4422 * @param pVmcsInfo The VMCS information structure.
4423 * @param fIsNestedGuest Flag whether the evaluation happens for a nestd guest.
4424 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
4425 */
4426static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
4427{
4428 Assert(pfIntrState);
4429 Assert(!TRPMHasTrap(pVCpu));
4430
4431 /*
4432 * Compute/update guest-interruptibility state related FFs.
4433 * The FFs will be used below while evaluating events to be injected.
4434 */
4435 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
4436
4437 /*
4438 * Evaluate if a new event needs to be injected.
4439 * An event that's already pending has already performed all necessary checks.
4440 */
4441 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
4442 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4443 {
4444 /** @todo SMI. SMIs take priority over NMIs. */
4445
4446 /*
4447 * NMIs.
4448 * NMIs take priority over external interrupts.
4449 */
4450#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4451 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4452#endif
4453 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4454 {
4455 /*
4456 * For a guest, the FF always indicates the guest's ability to receive an NMI.
4457 *
4458 * For a nested-guest, the FF always indicates the outer guest's ability to
4459 * receive an NMI while the guest-interruptibility state bit depends on whether
4460 * the nested-hypervisor is using virtual-NMIs.
4461 */
4462 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4463 {
4464#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4465 if ( fIsNestedGuest
4466 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
4467 return IEMExecVmxVmexitXcptNmi(pVCpu);
4468#endif
4469 vmxHCSetPendingXcptNmi(pVCpu);
4470 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4471 Log4Func(("NMI pending injection\n"));
4472
4473 /* We've injected the NMI, bail. */
4474 return VINF_SUCCESS;
4475 }
4476 else if (!fIsNestedGuest)
4477 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4478 }
4479
4480 /*
4481 * External interrupts (PIC/APIC).
4482 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
4483 * We cannot re-request the interrupt from the controller again.
4484 */
4485 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4486 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4487 {
4488 Assert(!DBGFIsStepping(pVCpu));
4489 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
4490 AssertRC(rc);
4491
4492 /*
4493 * We must not check EFLAGS directly when executing a nested-guest, use
4494 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
4495 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
4496 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
4497 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
4498 *
4499 * See Intel spec. 25.4.1 "Event Blocking".
4500 */
4501 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
4502 {
4503#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4504 if ( fIsNestedGuest
4505 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4506 {
4507 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
4508 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4509 return rcStrict;
4510 }
4511#endif
4512 uint8_t u8Interrupt;
4513 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
4514 if (RT_SUCCESS(rc))
4515 {
4516#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4517 if ( fIsNestedGuest
4518 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
4519 {
4520 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
4521 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
4522 return rcStrict;
4523 }
4524#endif
4525 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
4526 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
4527 }
4528 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4529 {
4530 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
4531
4532 if ( !fIsNestedGuest
4533 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
4534 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
4535 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
4536
4537 /*
4538 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
4539 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
4540 * need to re-set this force-flag here.
4541 */
4542 }
4543 else
4544 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
4545
4546 /* We've injected the interrupt or taken necessary action, bail. */
4547 return VINF_SUCCESS;
4548 }
4549 if (!fIsNestedGuest)
4550 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4551 }
4552 }
4553 else if (!fIsNestedGuest)
4554 {
4555 /*
4556 * An event is being injected or we are in an interrupt shadow. Check if another event is
4557 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
4558 * the pending event.
4559 */
4560 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
4561 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
4562 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
4563 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4564 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
4565 }
4566 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
4567
4568 return VINF_SUCCESS;
4569}
4570
4571
4572/**
4573 * Injects any pending events into the guest if the guest is in a state to
4574 * receive them.
4575 *
4576 * @returns Strict VBox status code (i.e. informational status codes too).
4577 * @param pVCpu The cross context virtual CPU structure.
4578 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
4579 * @param fIntrState The VT-x guest-interruptibility state.
4580 * @param fStepping Whether we are single-stepping the guest using the
4581 * hypervisor debugger and should return
4582 * VINF_EM_DBG_STEPPED if the event was dispatched
4583 * directly.
4584 */
4585static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t fIntrState, bool fStepping)
4586{
4587 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
4588#ifndef IN_NEM_DARWIN
4589 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4590#endif
4591
4592#ifdef VBOX_STRICT
4593 /*
4594 * Verify guest-interruptibility state.
4595 *
4596 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
4597 * since injecting an event may modify the interruptibility state and we must thus always
4598 * use fIntrState.
4599 */
4600 {
4601 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
4602 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
4603 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
4604 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
4605 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
4606 Assert(!TRPMHasTrap(pVCpu));
4607 NOREF(fBlockMovSS); NOREF(fBlockSti);
4608 }
4609#endif
4610
4611 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4612 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
4613 {
4614 /*
4615 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
4616 * pending even while injecting an event and in this case, we want a VM-exit as soon as
4617 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
4618 *
4619 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
4620 */
4621 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
4622#ifdef VBOX_STRICT
4623 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4624 {
4625 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
4626 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4627 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4628 }
4629 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
4630 {
4631 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
4632 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
4633 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
4634 }
4635#endif
4636 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
4637 uIntType));
4638
4639 /*
4640 * Inject the event and get any changes to the guest-interruptibility state.
4641 *
4642 * The guest-interruptibility state may need to be updated if we inject the event
4643 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
4644 */
4645 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
4646 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
4647
4648 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4649 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
4650 else
4651 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
4652 }
4653
4654 /*
4655 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
4656 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
4657 */
4658 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4659 && !fIsNestedGuest)
4660 {
4661 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4662
4663 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
4664 {
4665 /*
4666 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
4667 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
4668 */
4669 Assert(!DBGFIsStepping(pVCpu));
4670 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
4671 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
4672 AssertRC(rc);
4673 }
4674 else
4675 {
4676 /*
4677 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
4678 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
4679 * we take care of this case in vmxHCExportSharedDebugState and also the case if
4680 * we use MTF, so just make sure it's called before executing guest-code.
4681 */
4682 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
4683 }
4684 }
4685 /* else: for nested-guest currently handling while merging controls. */
4686
4687 /*
4688 * Finally, update the guest-interruptibility state.
4689 *
4690 * This is required for the real-on-v86 software interrupt injection, for
4691 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
4692 */
4693 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
4694 AssertRC(rc);
4695
4696 /*
4697 * There's no need to clear the VM-entry interruption-information field here if we're not
4698 * injecting anything. VT-x clears the valid bit on every VM-exit.
4699 *
4700 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
4701 */
4702
4703 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
4704 return rcStrict;
4705}
4706
4707
4708/**
4709 * Tries to determine what part of the guest-state VT-x has deemed as invalid
4710 * and update error record fields accordingly.
4711 *
4712 * @returns VMX_IGS_* error codes.
4713 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
4714 * wrong with the guest state.
4715 *
4716 * @param pVCpu The cross context virtual CPU structure.
4717 * @param pVmcsInfo The VMCS info. object.
4718 *
4719 * @remarks This function assumes our cache of the VMCS controls
4720 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
4721 */
4722static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
4723{
4724#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
4725#define HMVMX_CHECK_BREAK(expr, err) do { \
4726 if (!(expr)) { uError = (err); break; } \
4727 } while (0)
4728
4729 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4730 uint32_t uError = VMX_IGS_ERROR;
4731 uint32_t u32IntrState = 0;
4732#ifndef IN_NEM_DARWIN
4733 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4734 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
4735#else
4736 bool const fUnrestrictedGuest = true;
4737#endif
4738 do
4739 {
4740 int rc;
4741
4742 /*
4743 * Guest-interruptibility state.
4744 *
4745 * Read this first so that any check that fails prior to those that actually
4746 * require the guest-interruptibility state would still reflect the correct
4747 * VMCS value and avoids causing further confusion.
4748 */
4749 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
4750 AssertRC(rc);
4751
4752 uint32_t u32Val;
4753 uint64_t u64Val;
4754
4755 /*
4756 * CR0.
4757 */
4758 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4759 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
4760 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
4761 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
4762 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
4763 if (fUnrestrictedGuest)
4764 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4765
4766 uint64_t u64GuestCr0;
4767 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
4768 AssertRC(rc);
4769 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
4770 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
4771 if ( !fUnrestrictedGuest
4772 && (u64GuestCr0 & X86_CR0_PG)
4773 && !(u64GuestCr0 & X86_CR0_PE))
4774 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
4775
4776 /*
4777 * CR4.
4778 */
4779 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
4780 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
4781 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
4782
4783 uint64_t u64GuestCr4;
4784 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
4785 AssertRC(rc);
4786 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
4787 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
4788
4789 /*
4790 * IA32_DEBUGCTL MSR.
4791 */
4792 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
4793 AssertRC(rc);
4794 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4795 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
4796 {
4797 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
4798 }
4799 uint64_t u64DebugCtlMsr = u64Val;
4800
4801#ifdef VBOX_STRICT
4802 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
4803 AssertRC(rc);
4804 Assert(u32Val == pVmcsInfo->u32EntryCtls);
4805#endif
4806 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4807
4808 /*
4809 * RIP and RFLAGS.
4810 */
4811 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
4812 AssertRC(rc);
4813 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
4814 if ( !fLongModeGuest
4815 || !pCtx->cs.Attr.n.u1Long)
4816 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
4817 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
4818 * must be identical if the "IA-32e mode guest" VM-entry
4819 * control is 1 and CS.L is 1. No check applies if the
4820 * CPU supports 64 linear-address bits. */
4821
4822 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
4823 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
4824 AssertRC(rc);
4825 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
4826 VMX_IGS_RFLAGS_RESERVED);
4827 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
4828 uint32_t const u32Eflags = u64Val;
4829
4830 if ( fLongModeGuest
4831 || ( fUnrestrictedGuest
4832 && !(u64GuestCr0 & X86_CR0_PE)))
4833 {
4834 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
4835 }
4836
4837 uint32_t u32EntryInfo;
4838 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
4839 AssertRC(rc);
4840 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
4841 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
4842
4843 /*
4844 * 64-bit checks.
4845 */
4846 if (fLongModeGuest)
4847 {
4848 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
4849 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
4850 }
4851
4852 if ( !fLongModeGuest
4853 && (u64GuestCr4 & X86_CR4_PCIDE))
4854 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
4855
4856 /** @todo CR3 field must be such that bits 63:52 and bits in the range
4857 * 51:32 beyond the processor's physical-address width are 0. */
4858
4859 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4860 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
4861 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
4862
4863#ifndef IN_NEM_DARWIN
4864 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
4865 AssertRC(rc);
4866 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
4867
4868 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
4869 AssertRC(rc);
4870 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
4871#endif
4872
4873 /*
4874 * PERF_GLOBAL MSR.
4875 */
4876 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
4877 {
4878 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
4879 AssertRC(rc);
4880 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
4881 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
4882 }
4883
4884 /*
4885 * PAT MSR.
4886 */
4887 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4888 {
4889 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
4890 AssertRC(rc);
4891 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
4892 for (unsigned i = 0; i < 8; i++)
4893 {
4894 uint8_t u8Val = (u64Val & 0xff);
4895 if ( u8Val != 0 /* UC */
4896 && u8Val != 1 /* WC */
4897 && u8Val != 4 /* WT */
4898 && u8Val != 5 /* WP */
4899 && u8Val != 6 /* WB */
4900 && u8Val != 7 /* UC- */)
4901 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
4902 u64Val >>= 8;
4903 }
4904 }
4905
4906 /*
4907 * EFER MSR.
4908 */
4909 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4910 {
4911 Assert(g_fHmVmxSupportsVmcsEfer);
4912 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
4913 AssertRC(rc);
4914 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
4915 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
4916 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
4917 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
4918 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
4919 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
4920 * iemVmxVmentryCheckGuestState(). */
4921 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4922 || !(u64GuestCr0 & X86_CR0_PG)
4923 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
4924 VMX_IGS_EFER_LMA_LME_MISMATCH);
4925 }
4926
4927 /*
4928 * Segment registers.
4929 */
4930 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
4931 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
4932 if (!(u32Eflags & X86_EFL_VM))
4933 {
4934 /* CS */
4935 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
4936 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
4937 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
4938 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4939 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4940 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
4941 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
4942 /* CS cannot be loaded with NULL in protected mode. */
4943 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
4944 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
4945 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4946 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
4947 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4948 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
4949 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
4950 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
4951 else
4952 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
4953
4954 /* SS */
4955 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4956 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
4957 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
4958 if ( !(pCtx->cr0 & X86_CR0_PE)
4959 || pCtx->cs.Attr.n.u4Type == 3)
4960 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
4961
4962 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4963 {
4964 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
4965 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
4966 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
4967 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
4968 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4969 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4970 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
4971 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
4972 }
4973
4974 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
4975 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4976 {
4977 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
4978 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
4979 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4980 || pCtx->ds.Attr.n.u4Type > 11
4981 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4982 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
4983 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
4984 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4985 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4986 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
4987 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
4988 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4989 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
4990 }
4991 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4992 {
4993 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
4994 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
4995 HMVMX_CHECK_BREAK( fUnrestrictedGuest
4996 || pCtx->es.Attr.n.u4Type > 11
4997 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
4998 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
4999 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
5000 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
5001 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5002 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
5003 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
5004 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5005 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
5006 }
5007 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5008 {
5009 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
5010 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
5011 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5012 || pCtx->fs.Attr.n.u4Type > 11
5013 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
5014 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
5015 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
5016 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5017 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5018 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
5019 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
5020 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5021 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
5022 }
5023 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5024 {
5025 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
5026 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
5027 HMVMX_CHECK_BREAK( fUnrestrictedGuest
5028 || pCtx->gs.Attr.n.u4Type > 11
5029 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
5030 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
5031 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
5032 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5033 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5034 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
5035 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
5036 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5037 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
5038 }
5039 /* 64-bit capable CPUs. */
5040 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5041 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5042 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5043 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5044 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5045 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5046 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5047 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5048 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5049 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5050 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5051 }
5052 else
5053 {
5054 /* V86 mode checks. */
5055 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5056 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5057 {
5058 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
5059 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
5060 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5061 }
5062 else
5063 {
5064 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
5065 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
5066 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5067 }
5068
5069 /* CS */
5070 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
5071 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
5072 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
5073 /* SS */
5074 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
5075 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
5076 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
5077 /* DS */
5078 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
5079 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
5080 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
5081 /* ES */
5082 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
5083 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
5084 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
5085 /* FS */
5086 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
5087 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
5088 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
5089 /* GS */
5090 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
5091 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
5092 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
5093 /* 64-bit capable CPUs. */
5094 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
5095 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
5096 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
5097 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
5098 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
5099 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
5100 VMX_IGS_LONGMODE_SS_BASE_INVALID);
5101 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
5102 VMX_IGS_LONGMODE_DS_BASE_INVALID);
5103 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
5104 VMX_IGS_LONGMODE_ES_BASE_INVALID);
5105 }
5106
5107 /*
5108 * TR.
5109 */
5110 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
5111 /* 64-bit capable CPUs. */
5112 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
5113 if (fLongModeGuest)
5114 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
5115 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
5116 else
5117 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
5118 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
5119 VMX_IGS_TR_ATTR_TYPE_INVALID);
5120 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
5121 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
5122 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
5123 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
5124 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5125 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
5126 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
5127 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
5128
5129 /*
5130 * GDTR and IDTR (64-bit capable checks).
5131 */
5132 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
5133 AssertRC(rc);
5134 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
5135
5136 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
5137 AssertRC(rc);
5138 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
5139
5140 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
5141 AssertRC(rc);
5142 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5143
5144 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
5145 AssertRC(rc);
5146 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
5147
5148 /*
5149 * Guest Non-Register State.
5150 */
5151 /* Activity State. */
5152 uint32_t u32ActivityState;
5153 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
5154 AssertRC(rc);
5155 HMVMX_CHECK_BREAK( !u32ActivityState
5156 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
5157 VMX_IGS_ACTIVITY_STATE_INVALID);
5158 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
5159 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
5160
5161 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
5162 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5163 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
5164
5165 /** @todo Activity state and injecting interrupts. Left as a todo since we
5166 * currently don't use activity states but ACTIVE. */
5167
5168 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5169 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
5170
5171 /* Guest interruptibility-state. */
5172 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
5173 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5174 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5175 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
5176 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
5177 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5178 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
5179 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
5180 {
5181 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5182 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5183 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
5184 }
5185 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5186 {
5187 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
5188 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
5189 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
5190 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
5191 }
5192 /** @todo Assumes the processor is not in SMM. */
5193 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5194 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
5195 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
5196 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
5197 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
5198 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5199 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
5200 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
5201
5202 /* Pending debug exceptions. */
5203 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
5204 AssertRC(rc);
5205 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
5206 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
5207 u32Val = u64Val; /* For pending debug exceptions checks below. */
5208
5209 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5210 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5211 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5212 {
5213 if ( (u32Eflags & X86_EFL_TF)
5214 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5215 {
5216 /* Bit 14 is PendingDebug.BS. */
5217 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
5218 }
5219 if ( !(u32Eflags & X86_EFL_TF)
5220 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
5221 {
5222 /* Bit 14 is PendingDebug.BS. */
5223 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
5224 }
5225 }
5226
5227#ifndef IN_NEM_DARWIN
5228 /* VMCS link pointer. */
5229 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
5230 AssertRC(rc);
5231 if (u64Val != UINT64_C(0xffffffffffffffff))
5232 {
5233 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
5234 /** @todo Bits beyond the processor's physical-address width MBZ. */
5235 /** @todo SMM checks. */
5236 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
5237 Assert(pVmcsInfo->pvShadowVmcs);
5238 VMXVMCSREVID VmcsRevId;
5239 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
5240 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
5241 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
5242 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
5243 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
5244 }
5245
5246 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
5247 * not using nested paging? */
5248 if ( VM_IS_VMX_NESTED_PAGING(pVM)
5249 && !fLongModeGuest
5250 && CPUMIsGuestInPAEModeEx(pCtx))
5251 {
5252 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
5253 AssertRC(rc);
5254 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5255
5256 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
5257 AssertRC(rc);
5258 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5259
5260 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
5261 AssertRC(rc);
5262 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5263
5264 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
5265 AssertRC(rc);
5266 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
5267 }
5268#endif
5269
5270 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
5271 if (uError == VMX_IGS_ERROR)
5272 uError = VMX_IGS_REASON_NOT_FOUND;
5273 } while (0);
5274
5275 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
5276 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
5277 return uError;
5278
5279#undef HMVMX_ERROR_BREAK
5280#undef HMVMX_CHECK_BREAK
5281}
5282/** @} */
5283
5284
5285#ifndef HMVMX_USE_FUNCTION_TABLE
5286/**
5287 * Handles a guest VM-exit from hardware-assisted VMX execution.
5288 *
5289 * @returns Strict VBox status code (i.e. informational status codes too).
5290 * @param pVCpu The cross context virtual CPU structure.
5291 * @param pVmxTransient The VMX-transient structure.
5292 */
5293DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5294{
5295#ifdef DEBUG_ramshankar
5296# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
5297 do { \
5298 if (a_fSave != 0) \
5299 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
5300 VBOXSTRICTRC rcStrict = a_CallExpr; \
5301 if (a_fSave != 0) \
5302 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
5303 return rcStrict; \
5304 } while (0)
5305#else
5306# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
5307#endif
5308 uint32_t const uExitReason = pVmxTransient->uExitReason;
5309 switch (uExitReason)
5310 {
5311 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
5312 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
5313 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
5314 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
5315 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
5316 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
5317 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
5318 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
5319 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
5320 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
5321 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
5322 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
5323 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
5324 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
5325 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
5326 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
5327 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
5328 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
5329 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
5330 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
5331 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
5332 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
5333 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
5334 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
5335 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
5336 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
5337 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
5338 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
5339 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
5340 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
5341#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5342 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
5343 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
5344 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
5345 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
5346 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
5347 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
5348 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
5349 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
5350 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
5351 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
5352#else
5353 case VMX_EXIT_VMCLEAR:
5354 case VMX_EXIT_VMLAUNCH:
5355 case VMX_EXIT_VMPTRLD:
5356 case VMX_EXIT_VMPTRST:
5357 case VMX_EXIT_VMREAD:
5358 case VMX_EXIT_VMRESUME:
5359 case VMX_EXIT_VMWRITE:
5360 case VMX_EXIT_VMXOFF:
5361 case VMX_EXIT_VMXON:
5362 case VMX_EXIT_INVVPID:
5363 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5364#endif
5365#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT)
5366 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitInvept(pVCpu, pVmxTransient));
5367#else
5368 case VMX_EXIT_INVEPT: return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
5369#endif
5370
5371 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
5372 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
5373 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
5374
5375 case VMX_EXIT_INIT_SIGNAL:
5376 case VMX_EXIT_SIPI:
5377 case VMX_EXIT_IO_SMI:
5378 case VMX_EXIT_SMI:
5379 case VMX_EXIT_ERR_MSR_LOAD:
5380 case VMX_EXIT_ERR_MACHINE_CHECK:
5381 case VMX_EXIT_PML_FULL:
5382 case VMX_EXIT_VIRTUALIZED_EOI:
5383 case VMX_EXIT_GDTR_IDTR_ACCESS:
5384 case VMX_EXIT_LDTR_TR_ACCESS:
5385 case VMX_EXIT_APIC_WRITE:
5386 case VMX_EXIT_RDRAND:
5387 case VMX_EXIT_RSM:
5388 case VMX_EXIT_VMFUNC:
5389 case VMX_EXIT_ENCLS:
5390 case VMX_EXIT_RDSEED:
5391 case VMX_EXIT_XSAVES:
5392 case VMX_EXIT_XRSTORS:
5393 case VMX_EXIT_UMWAIT:
5394 case VMX_EXIT_TPAUSE:
5395 case VMX_EXIT_LOADIWKEY:
5396 default:
5397 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5398 }
5399#undef VMEXIT_CALL_RET
5400}
5401#endif /* !HMVMX_USE_FUNCTION_TABLE */
5402
5403
5404#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5405/**
5406 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
5407 *
5408 * @returns Strict VBox status code (i.e. informational status codes too).
5409 * @param pVCpu The cross context virtual CPU structure.
5410 * @param pVmxTransient The VMX-transient structure.
5411 */
5412DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5413{
5414 uint32_t const uExitReason = pVmxTransient->uExitReason;
5415 switch (uExitReason)
5416 {
5417 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfigNested(pVCpu, pVmxTransient);
5418 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolationNested(pVCpu, pVmxTransient);
5419 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
5420 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
5421 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
5422
5423 /*
5424 * We shouldn't direct host physical interrupts to the nested-guest.
5425 */
5426 case VMX_EXIT_EXT_INT:
5427 return vmxHCExitExtInt(pVCpu, pVmxTransient);
5428
5429 /*
5430 * Instructions that cause VM-exits unconditionally or the condition is
5431 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5432 * happens, it's guaranteed to be a nested-guest VM-exit).
5433 *
5434 * - Provides VM-exit instruction length ONLY.
5435 */
5436 case VMX_EXIT_CPUID: /* Unconditional. */
5437 case VMX_EXIT_VMCALL:
5438 case VMX_EXIT_GETSEC:
5439 case VMX_EXIT_INVD:
5440 case VMX_EXIT_XSETBV:
5441 case VMX_EXIT_VMLAUNCH:
5442 case VMX_EXIT_VMRESUME:
5443 case VMX_EXIT_VMXOFF:
5444 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
5445 case VMX_EXIT_VMFUNC:
5446 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
5447
5448 /*
5449 * Instructions that cause VM-exits unconditionally or the condition is
5450 * always is taken solely from the nested hypervisor (meaning if the VM-exit
5451 * happens, it's guaranteed to be a nested-guest VM-exit).
5452 *
5453 * - Provides VM-exit instruction length.
5454 * - Provides VM-exit information.
5455 * - Optionally provides Exit qualification.
5456 *
5457 * Since Exit qualification is 0 for all VM-exits where it is not
5458 * applicable, reading and passing it to the guest should produce
5459 * defined behavior.
5460 *
5461 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
5462 */
5463 case VMX_EXIT_INVEPT: /* Unconditional. */
5464 case VMX_EXIT_INVVPID:
5465 case VMX_EXIT_VMCLEAR:
5466 case VMX_EXIT_VMPTRLD:
5467 case VMX_EXIT_VMPTRST:
5468 case VMX_EXIT_VMXON:
5469 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
5470 case VMX_EXIT_LDTR_TR_ACCESS:
5471 case VMX_EXIT_RDRAND:
5472 case VMX_EXIT_RDSEED:
5473 case VMX_EXIT_XSAVES:
5474 case VMX_EXIT_XRSTORS:
5475 case VMX_EXIT_UMWAIT:
5476 case VMX_EXIT_TPAUSE:
5477 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
5478
5479 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
5480 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
5481 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
5482 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
5483 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
5484 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
5485 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
5486 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
5487 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
5488 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
5489 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
5490 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
5491 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
5492 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
5493 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
5494 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
5495 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
5496 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
5497 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
5498
5499 case VMX_EXIT_PREEMPT_TIMER:
5500 {
5501 /** @todo NSTVMX: Preempt timer. */
5502 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
5503 }
5504
5505 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
5506 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
5507
5508 case VMX_EXIT_VMREAD:
5509 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
5510
5511 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
5512 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
5513
5514 case VMX_EXIT_INIT_SIGNAL:
5515 case VMX_EXIT_SIPI:
5516 case VMX_EXIT_IO_SMI:
5517 case VMX_EXIT_SMI:
5518 case VMX_EXIT_ERR_MSR_LOAD:
5519 case VMX_EXIT_ERR_MACHINE_CHECK:
5520 case VMX_EXIT_PML_FULL:
5521 case VMX_EXIT_RSM:
5522 default:
5523 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
5524 }
5525}
5526#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5527
5528
5529/** @name VM-exit helpers.
5530 * @{
5531 */
5532/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5533/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5534/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5535
5536/** Macro for VM-exits called unexpectedly. */
5537#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
5538 do { \
5539 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
5540 return VERR_VMX_UNEXPECTED_EXIT; \
5541 } while (0)
5542
5543#ifdef VBOX_STRICT
5544# ifndef IN_NEM_DARWIN
5545/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5546# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
5547 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5548
5549# define HMVMX_ASSERT_PREEMPT_CPUID() \
5550 do { \
5551 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5552 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5553 } while (0)
5554
5555# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5556 do { \
5557 AssertPtr((a_pVCpu)); \
5558 AssertPtr((a_pVmxTransient)); \
5559 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5560 Assert((a_pVmxTransient)->pVmcsInfo); \
5561 Assert(ASMIntAreEnabled()); \
5562 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5563 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
5564 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5565 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
5566 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
5567 HMVMX_ASSERT_PREEMPT_CPUID(); \
5568 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5569 } while (0)
5570# else
5571# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
5572# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
5573# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5574 do { \
5575 AssertPtr((a_pVCpu)); \
5576 AssertPtr((a_pVmxTransient)); \
5577 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
5578 Assert((a_pVmxTransient)->pVmcsInfo); \
5579 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
5580 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5581 } while (0)
5582# endif
5583
5584# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5585 do { \
5586 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
5587 Assert((a_pVmxTransient)->fIsNestedGuest); \
5588 } while (0)
5589
5590# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5591 do { \
5592 Log4Func(("\n")); \
5593 } while (0)
5594#else
5595# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5596 do { \
5597 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
5598 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
5599 } while (0)
5600
5601# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
5602 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
5603
5604# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
5605#endif
5606
5607#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5608/** Macro that does the necessary privilege checks and intercepted VM-exits for
5609 * guests that attempted to execute a VMX instruction. */
5610# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
5611 do \
5612 { \
5613 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
5614 if (rcStrictTmp == VINF_SUCCESS) \
5615 { /* likely */ } \
5616 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5617 { \
5618 Assert((a_pVCpu)->hm.s.Event.fPending); \
5619 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
5620 return VINF_SUCCESS; \
5621 } \
5622 else \
5623 { \
5624 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
5625 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
5626 } \
5627 } while (0)
5628
5629/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
5630# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
5631 do \
5632 { \
5633 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
5634 (a_pGCPtrEffAddr)); \
5635 if (rcStrictTmp == VINF_SUCCESS) \
5636 { /* likely */ } \
5637 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
5638 { \
5639 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
5640 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
5641 NOREF(uXcptTmp); \
5642 return VINF_SUCCESS; \
5643 } \
5644 else \
5645 { \
5646 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
5647 return rcStrictTmp; \
5648 } \
5649 } while (0)
5650#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5651
5652
5653/**
5654 * Advances the guest RIP by the specified number of bytes.
5655 *
5656 * @param pVCpu The cross context virtual CPU structure.
5657 * @param cbInstr Number of bytes to advance the RIP by.
5658 *
5659 * @remarks No-long-jump zone!!!
5660 */
5661DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
5662{
5663 /* Advance the RIP. */
5664 pVCpu->cpum.GstCtx.rip += cbInstr;
5665 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
5666
5667 /* Update interrupt inhibition. */
5668 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5669 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
5670 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5671}
5672
5673
5674/**
5675 * Advances the guest RIP after reading it from the VMCS.
5676 *
5677 * @returns VBox status code, no informational status codes.
5678 * @param pVCpu The cross context virtual CPU structure.
5679 * @param pVmxTransient The VMX-transient structure.
5680 *
5681 * @remarks No-long-jump zone!!!
5682 */
5683static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5684{
5685 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
5686 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5687 AssertRCReturn(rc, rc);
5688
5689 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
5690 return VINF_SUCCESS;
5691}
5692
5693
5694/**
5695 * Handle a condition that occurred while delivering an event through the guest or
5696 * nested-guest IDT.
5697 *
5698 * @returns Strict VBox status code (i.e. informational status codes too).
5699 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5700 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5701 * to continue execution of the guest which will delivery the \#DF.
5702 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5703 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5704 *
5705 * @param pVCpu The cross context virtual CPU structure.
5706 * @param pVmxTransient The VMX-transient structure.
5707 *
5708 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
5709 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
5710 * is due to an EPT violation, PML full or SPP-related event.
5711 *
5712 * @remarks No-long-jump zone!!!
5713 */
5714static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
5715{
5716 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5717 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
5718 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5719 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5720 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5721 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
5722
5723 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5724 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5725 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
5726 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
5727 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
5728 {
5729 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
5730 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
5731
5732 /*
5733 * If the event was a software interrupt (generated with INT n) or a software exception
5734 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5735 * can handle the VM-exit and continue guest execution which will re-execute the
5736 * instruction rather than re-injecting the exception, as that can cause premature
5737 * trips to ring-3 before injection and involve TRPM which currently has no way of
5738 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5739 * the problem).
5740 */
5741 IEMXCPTRAISE enmRaise;
5742 IEMXCPTRAISEINFO fRaiseInfo;
5743 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5744 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5745 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5746 {
5747 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5748 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5749 }
5750 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
5751 {
5752 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
5753 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
5754 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
5755
5756 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
5757 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
5758
5759 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5760
5761 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
5762 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5763 {
5764 pVmxTransient->fVectoringPF = true;
5765 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5766 }
5767 }
5768 else
5769 {
5770 /*
5771 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5772 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5773 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5774 */
5775 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5776 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5777 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5778 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5779 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5780 }
5781
5782 /*
5783 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5784 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5785 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5786 * subsequent VM-entry would fail, see @bugref{7445}.
5787 *
5788 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
5789 */
5790 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5791 && enmRaise == IEMXCPTRAISE_PREV_EVENT
5792 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5793 && CPUMIsGuestNmiBlocking(pVCpu))
5794 {
5795 CPUMSetGuestNmiBlocking(pVCpu, false);
5796 }
5797
5798 switch (enmRaise)
5799 {
5800 case IEMXCPTRAISE_CURRENT_XCPT:
5801 {
5802 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
5803 Assert(rcStrict == VINF_SUCCESS);
5804 break;
5805 }
5806
5807 case IEMXCPTRAISE_PREV_EVENT:
5808 {
5809 uint32_t u32ErrCode;
5810 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
5811 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5812 else
5813 u32ErrCode = 0;
5814
5815 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
5816 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
5817 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
5818 u32ErrCode, pVCpu->cpum.GstCtx.cr2);
5819
5820 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5821 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
5822 Assert(rcStrict == VINF_SUCCESS);
5823 break;
5824 }
5825
5826 case IEMXCPTRAISE_REEXEC_INSTR:
5827 Assert(rcStrict == VINF_SUCCESS);
5828 break;
5829
5830 case IEMXCPTRAISE_DOUBLE_FAULT:
5831 {
5832 /*
5833 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5834 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5835 */
5836 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5837 {
5838 pVmxTransient->fVectoringDoublePF = true;
5839 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5840 pVCpu->cpum.GstCtx.cr2));
5841 rcStrict = VINF_SUCCESS;
5842 }
5843 else
5844 {
5845 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
5846 vmxHCSetPendingXcptDF(pVCpu);
5847 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
5848 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5849 rcStrict = VINF_HM_DOUBLE_FAULT;
5850 }
5851 break;
5852 }
5853
5854 case IEMXCPTRAISE_TRIPLE_FAULT:
5855 {
5856 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
5857 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
5858 rcStrict = VINF_EM_RESET;
5859 break;
5860 }
5861
5862 case IEMXCPTRAISE_CPU_HANG:
5863 {
5864 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
5865 rcStrict = VERR_EM_GUEST_CPU_HANG;
5866 break;
5867 }
5868
5869 default:
5870 {
5871 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
5872 rcStrict = VERR_VMX_IPE_2;
5873 break;
5874 }
5875 }
5876 }
5877 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5878 && !CPUMIsGuestNmiBlocking(pVCpu))
5879 {
5880 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
5881 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
5882 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
5883 {
5884 /*
5885 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
5886 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5887 * that virtual NMIs remain blocked until the IRET execution is completed.
5888 *
5889 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
5890 */
5891 CPUMSetGuestNmiBlocking(pVCpu, true);
5892 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5893 }
5894 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
5895 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
5896 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
5897 {
5898 /*
5899 * Execution of IRET caused an EPT violation, page-modification log-full event or
5900 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
5901 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
5902 * that virtual NMIs remain blocked until the IRET execution is completed.
5903 *
5904 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
5905 */
5906 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
5907 {
5908 CPUMSetGuestNmiBlocking(pVCpu, true);
5909 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
5910 }
5911 }
5912 }
5913
5914 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5915 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5916 return rcStrict;
5917}
5918
5919
5920#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5921/**
5922 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
5923 * guest attempting to execute a VMX instruction.
5924 *
5925 * @returns Strict VBox status code (i.e. informational status codes too).
5926 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5927 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
5928 *
5929 * @param pVCpu The cross context virtual CPU structure.
5930 * @param uExitReason The VM-exit reason.
5931 *
5932 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
5933 * @remarks No-long-jump zone!!!
5934 */
5935static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
5936{
5937 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
5938 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
5939
5940 /*
5941 * The physical CPU would have already checked the CPU mode/code segment.
5942 * We shall just assert here for paranoia.
5943 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
5944 */
5945 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
5946 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5947 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
5948
5949 if (uExitReason == VMX_EXIT_VMXON)
5950 {
5951 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5952
5953 /*
5954 * We check CR4.VMXE because it is required to be always set while in VMX operation
5955 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
5956 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
5957 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
5958 */
5959 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
5960 {
5961 Log4Func(("CR4.VMXE is not set -> #UD\n"));
5962 vmxHCSetPendingXcptUD(pVCpu);
5963 return VINF_HM_PENDING_XCPT;
5964 }
5965 }
5966 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
5967 {
5968 /*
5969 * The guest has not entered VMX operation but attempted to execute a VMX instruction
5970 * (other than VMXON), we need to raise a #UD.
5971 */
5972 Log4Func(("Not in VMX root mode -> #UD\n"));
5973 vmxHCSetPendingXcptUD(pVCpu);
5974 return VINF_HM_PENDING_XCPT;
5975 }
5976
5977 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
5978 return VINF_SUCCESS;
5979}
5980
5981
5982/**
5983 * Decodes the memory operand of an instruction that caused a VM-exit.
5984 *
5985 * The Exit qualification field provides the displacement field for memory
5986 * operand instructions, if any.
5987 *
5988 * @returns Strict VBox status code (i.e. informational status codes too).
5989 * @retval VINF_SUCCESS if the operand was successfully decoded.
5990 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
5991 * operand.
5992 * @param pVCpu The cross context virtual CPU structure.
5993 * @param uExitInstrInfo The VM-exit instruction information field.
5994 * @param enmMemAccess The memory operand's access type (read or write).
5995 * @param GCPtrDisp The instruction displacement field, if any. For
5996 * RIP-relative addressing pass RIP + displacement here.
5997 * @param pGCPtrMem Where to store the effective destination memory address.
5998 *
5999 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6000 * virtual-8086 mode hence skips those checks while verifying if the
6001 * segment is valid.
6002 */
6003static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6004 PRTGCPTR pGCPtrMem)
6005{
6006 Assert(pGCPtrMem);
6007 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6008 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6009 | CPUMCTX_EXTRN_CR0);
6010
6011 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6012 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6013 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6014
6015 VMXEXITINSTRINFO ExitInstrInfo;
6016 ExitInstrInfo.u = uExitInstrInfo;
6017 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6018 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6019 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6020 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6021 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6022 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6023 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6024 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6025 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6026
6027 /*
6028 * Validate instruction information.
6029 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6030 */
6031 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6032 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6033 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6034 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6035 AssertLogRelMsgReturn(fIsMemOperand,
6036 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6037
6038 /*
6039 * Compute the complete effective address.
6040 *
6041 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6042 * See AMD spec. 4.5.2 "Segment Registers".
6043 */
6044 RTGCPTR GCPtrMem = GCPtrDisp;
6045 if (fBaseRegValid)
6046 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6047 if (fIdxRegValid)
6048 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6049
6050 RTGCPTR const GCPtrOff = GCPtrMem;
6051 if ( !fIsLongMode
6052 || iSegReg >= X86_SREG_FS)
6053 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6054 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6055
6056 /*
6057 * Validate effective address.
6058 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6059 */
6060 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6061 Assert(cbAccess > 0);
6062 if (fIsLongMode)
6063 {
6064 if (X86_IS_CANONICAL(GCPtrMem))
6065 {
6066 *pGCPtrMem = GCPtrMem;
6067 return VINF_SUCCESS;
6068 }
6069
6070 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6071 * "Data Limit Checks in 64-bit Mode". */
6072 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6073 vmxHCSetPendingXcptGP(pVCpu, 0);
6074 return VINF_HM_PENDING_XCPT;
6075 }
6076
6077 /*
6078 * This is a watered down version of iemMemApplySegment().
6079 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6080 * and segment CPL/DPL checks are skipped.
6081 */
6082 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6083 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6084 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6085
6086 /* Check if the segment is present and usable. */
6087 if ( pSel->Attr.n.u1Present
6088 && !pSel->Attr.n.u1Unusable)
6089 {
6090 Assert(pSel->Attr.n.u1DescType);
6091 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6092 {
6093 /* Check permissions for the data segment. */
6094 if ( enmMemAccess == VMXMEMACCESS_WRITE
6095 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6096 {
6097 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6098 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
6099 return VINF_HM_PENDING_XCPT;
6100 }
6101
6102 /* Check limits if it's a normal data segment. */
6103 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6104 {
6105 if ( GCPtrFirst32 > pSel->u32Limit
6106 || GCPtrLast32 > pSel->u32Limit)
6107 {
6108 Log4Func(("Data segment limit exceeded. "
6109 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6110 GCPtrLast32, pSel->u32Limit));
6111 if (iSegReg == X86_SREG_SS)
6112 vmxHCSetPendingXcptSS(pVCpu, 0);
6113 else
6114 vmxHCSetPendingXcptGP(pVCpu, 0);
6115 return VINF_HM_PENDING_XCPT;
6116 }
6117 }
6118 else
6119 {
6120 /* Check limits if it's an expand-down data segment.
6121 Note! The upper boundary is defined by the B bit, not the G bit! */
6122 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6123 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6124 {
6125 Log4Func(("Expand-down data segment limit exceeded. "
6126 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6127 GCPtrLast32, pSel->u32Limit));
6128 if (iSegReg == X86_SREG_SS)
6129 vmxHCSetPendingXcptSS(pVCpu, 0);
6130 else
6131 vmxHCSetPendingXcptGP(pVCpu, 0);
6132 return VINF_HM_PENDING_XCPT;
6133 }
6134 }
6135 }
6136 else
6137 {
6138 /* Check permissions for the code segment. */
6139 if ( enmMemAccess == VMXMEMACCESS_WRITE
6140 || ( enmMemAccess == VMXMEMACCESS_READ
6141 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6142 {
6143 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6144 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6145 vmxHCSetPendingXcptGP(pVCpu, 0);
6146 return VINF_HM_PENDING_XCPT;
6147 }
6148
6149 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6150 if ( GCPtrFirst32 > pSel->u32Limit
6151 || GCPtrLast32 > pSel->u32Limit)
6152 {
6153 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6154 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6155 if (iSegReg == X86_SREG_SS)
6156 vmxHCSetPendingXcptSS(pVCpu, 0);
6157 else
6158 vmxHCSetPendingXcptGP(pVCpu, 0);
6159 return VINF_HM_PENDING_XCPT;
6160 }
6161 }
6162 }
6163 else
6164 {
6165 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6166 vmxHCSetPendingXcptGP(pVCpu, 0);
6167 return VINF_HM_PENDING_XCPT;
6168 }
6169
6170 *pGCPtrMem = GCPtrMem;
6171 return VINF_SUCCESS;
6172}
6173#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6174
6175
6176/**
6177 * VM-exit helper for LMSW.
6178 */
6179static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
6180{
6181 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6182 AssertRCReturn(rc, rc);
6183
6184 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
6185 AssertMsg( rcStrict == VINF_SUCCESS
6186 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6187
6188 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6189 if (rcStrict == VINF_IEM_RAISED_XCPT)
6190 {
6191 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6192 rcStrict = VINF_SUCCESS;
6193 }
6194
6195 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
6196 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6197 return rcStrict;
6198}
6199
6200
6201/**
6202 * VM-exit helper for CLTS.
6203 */
6204static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
6205{
6206 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6207 AssertRCReturn(rc, rc);
6208
6209 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
6210 AssertMsg( rcStrict == VINF_SUCCESS
6211 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6212
6213 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
6214 if (rcStrict == VINF_IEM_RAISED_XCPT)
6215 {
6216 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6217 rcStrict = VINF_SUCCESS;
6218 }
6219
6220 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
6221 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6222 return rcStrict;
6223}
6224
6225
6226/**
6227 * VM-exit helper for MOV from CRx (CRx read).
6228 */
6229static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6230{
6231 Assert(iCrReg < 16);
6232 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
6233
6234 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6235 AssertRCReturn(rc, rc);
6236
6237 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6238 AssertMsg( rcStrict == VINF_SUCCESS
6239 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6240
6241 if (iGReg == X86_GREG_xSP)
6242 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
6243 else
6244 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
6245#ifdef VBOX_WITH_STATISTICS
6246 switch (iCrReg)
6247 {
6248 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
6249 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
6250 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
6251 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
6252 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
6253 }
6254#endif
6255 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
6256 return rcStrict;
6257}
6258
6259
6260/**
6261 * VM-exit helper for MOV to CRx (CRx write).
6262 */
6263static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
6264{
6265 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6266
6267 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6268 AssertMsg( rcStrict == VINF_SUCCESS
6269 || rcStrict == VINF_IEM_RAISED_XCPT
6270 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6271
6272 switch (iCrReg)
6273 {
6274 case 0:
6275 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
6276 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
6277 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
6278 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
6279 break;
6280
6281 case 2:
6282 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
6283 /* Nothing to do here, CR2 it's not part of the VMCS. */
6284 break;
6285
6286 case 3:
6287 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
6288 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
6289 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
6290 break;
6291
6292 case 4:
6293 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
6294 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
6295#ifndef IN_NEM_DARWIN
6296 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
6297 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
6298#else
6299 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
6300#endif
6301 break;
6302
6303 case 8:
6304 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6305 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
6306 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
6307 break;
6308
6309 default:
6310 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
6311 break;
6312 }
6313
6314 if (rcStrict == VINF_IEM_RAISED_XCPT)
6315 {
6316 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6317 rcStrict = VINF_SUCCESS;
6318 }
6319 return rcStrict;
6320}
6321
6322
6323/**
6324 * VM-exit exception handler for \#PF (Page-fault exception).
6325 *
6326 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6327 */
6328static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6329{
6330 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6331 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6332
6333#ifndef IN_NEM_DARWIN
6334 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6335 if (!VM_IS_VMX_NESTED_PAGING(pVM))
6336 { /* likely */ }
6337 else
6338#endif
6339 {
6340#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
6341 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
6342#endif
6343 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
6344 if (!pVmxTransient->fVectoringDoublePF)
6345 {
6346 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6347 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
6348 }
6349 else
6350 {
6351 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6352 Assert(!pVmxTransient->fIsNestedGuest);
6353 vmxHCSetPendingXcptDF(pVCpu);
6354 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
6355 }
6356 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6357 return VINF_SUCCESS;
6358 }
6359
6360 Assert(!pVmxTransient->fIsNestedGuest);
6361
6362 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
6363 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
6364 if (pVmxTransient->fVectoringPF)
6365 {
6366 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6367 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6368 }
6369
6370 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6371 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6372 AssertRCReturn(rc, rc);
6373
6374 Log4Func(("#PF: cs:rip=%#04x:%#RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
6375 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
6376
6377 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
6378 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
6379
6380 Log4Func(("#PF: rc=%Rrc\n", rc));
6381 if (rc == VINF_SUCCESS)
6382 {
6383 /*
6384 * This is typically a shadow page table sync or a MMIO instruction. But we may have
6385 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
6386 */
6387 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6388 TRPMResetTrap(pVCpu);
6389 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
6390 return rc;
6391 }
6392
6393 if (rc == VINF_EM_RAW_GUEST_TRAP)
6394 {
6395 if (!pVmxTransient->fVectoringDoublePF)
6396 {
6397 /* It's a guest page fault and needs to be reflected to the guest. */
6398 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
6399 TRPMResetTrap(pVCpu);
6400 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
6401 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
6402 uGstErrorCode, pVmxTransient->uExitQual);
6403 }
6404 else
6405 {
6406 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
6407 TRPMResetTrap(pVCpu);
6408 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
6409 vmxHCSetPendingXcptDF(pVCpu);
6410 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
6411 }
6412
6413 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
6414 return VINF_SUCCESS;
6415 }
6416
6417 TRPMResetTrap(pVCpu);
6418 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
6419 return rc;
6420}
6421
6422
6423/**
6424 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
6425 *
6426 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6427 */
6428static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6429{
6430 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6431 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
6432
6433 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
6434 AssertRCReturn(rc, rc);
6435
6436 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
6437 {
6438 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
6439 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
6440
6441 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
6442 * provides VM-exit instruction length. If this causes problem later,
6443 * disassemble the instruction like it's done on AMD-V. */
6444 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6445 AssertRCReturn(rc2, rc2);
6446 return rc;
6447 }
6448
6449 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
6450 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6451 return VINF_SUCCESS;
6452}
6453
6454
6455/**
6456 * VM-exit exception handler for \#BP (Breakpoint exception).
6457 *
6458 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6459 */
6460static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6461{
6462 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6463 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
6464
6465 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6466 AssertRCReturn(rc, rc);
6467
6468 VBOXSTRICTRC rcStrict;
6469 if (!pVmxTransient->fIsNestedGuest)
6470 rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
6471 else
6472 rcStrict = VINF_EM_RAW_GUEST_TRAP;
6473
6474 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
6475 {
6476 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6477 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6478 rcStrict = VINF_SUCCESS;
6479 }
6480
6481 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
6482 return rcStrict;
6483}
6484
6485
6486/**
6487 * VM-exit exception handler for \#AC (Alignment-check exception).
6488 *
6489 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6490 */
6491static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6492{
6493 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6494
6495 /*
6496 * Detect #ACs caused by host having enabled split-lock detection.
6497 * Emulate such instructions.
6498 */
6499 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
6500 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
6501 AssertRCReturn(rc, rc);
6502 /** @todo detect split lock in cpu feature? */
6503 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
6504 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6505 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
6506 || CPUMGetGuestCPL(pVCpu) != 3
6507 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
6508 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
6509 {
6510 /*
6511 * Check for debug/trace events and import state accordingly.
6512 */
6513 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
6514 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6515 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
6516#ifndef IN_NEM_DARWIN
6517 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
6518#endif
6519 )
6520 {
6521 if (pVM->cCpus == 1)
6522 {
6523#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6524 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
6525#else
6526 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6527#endif
6528 AssertRCReturn(rc, rc);
6529 }
6530 }
6531 else
6532 {
6533 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6534 AssertRCReturn(rc, rc);
6535
6536 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
6537
6538 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
6539 {
6540 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
6541 if (rcStrict != VINF_SUCCESS)
6542 return rcStrict;
6543 }
6544 }
6545
6546 /*
6547 * Emulate the instruction.
6548 *
6549 * We have to ignore the LOCK prefix here as we must not retrigger the
6550 * detection on the host. This isn't all that satisfactory, though...
6551 */
6552 if (pVM->cCpus == 1)
6553 {
6554 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
6555 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6556
6557 /** @todo For SMP configs we should do a rendezvous here. */
6558 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
6559 if (rcStrict == VINF_SUCCESS)
6560#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
6561 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
6562 HM_CHANGED_GUEST_RIP
6563 | HM_CHANGED_GUEST_RFLAGS
6564 | HM_CHANGED_GUEST_GPRS_MASK
6565 | HM_CHANGED_GUEST_CS
6566 | HM_CHANGED_GUEST_SS);
6567#else
6568 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6569#endif
6570 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6571 {
6572 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6573 rcStrict = VINF_SUCCESS;
6574 }
6575 return rcStrict;
6576 }
6577 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
6578 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
6579 return VINF_EM_EMULATE_SPLIT_LOCK;
6580 }
6581
6582 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
6583 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6584 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
6585
6586 /* Re-inject it. We'll detect any nesting before getting here. */
6587 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6588 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6589 return VINF_SUCCESS;
6590}
6591
6592
6593/**
6594 * VM-exit exception handler for \#DB (Debug exception).
6595 *
6596 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6597 */
6598static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6599{
6600 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6601 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
6602
6603 /*
6604 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
6605 */
6606 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
6607
6608 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
6609 uint64_t const uDR6 = X86_DR6_INIT_VAL
6610 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
6611 | X86_DR6_BD | X86_DR6_BS));
6612
6613 int rc;
6614 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6615 if (!pVmxTransient->fIsNestedGuest)
6616 {
6617 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6618
6619 /*
6620 * Prevents stepping twice over the same instruction when the guest is stepping using
6621 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
6622 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
6623 */
6624 if ( rc == VINF_EM_DBG_STEPPED
6625 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
6626 {
6627 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
6628 rc = VINF_EM_RAW_GUEST_TRAP;
6629 }
6630 }
6631 else
6632 rc = VINF_EM_RAW_GUEST_TRAP;
6633 Log6Func(("rc=%Rrc\n", rc));
6634 if (rc == VINF_EM_RAW_GUEST_TRAP)
6635 {
6636 /*
6637 * The exception was for the guest. Update DR6, DR7.GD and
6638 * IA32_DEBUGCTL.LBR before forwarding it.
6639 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
6640 */
6641#ifndef IN_NEM_DARWIN
6642 VMMRZCallRing3Disable(pVCpu);
6643 HM_DISABLE_PREEMPT(pVCpu);
6644
6645 pCtx->dr[6] &= ~X86_DR6_B_MASK;
6646 pCtx->dr[6] |= uDR6;
6647 if (CPUMIsGuestDebugStateActive(pVCpu))
6648 ASMSetDR6(pCtx->dr[6]);
6649
6650 HM_RESTORE_PREEMPT();
6651 VMMRZCallRing3Enable(pVCpu);
6652#else
6653 /** @todo */
6654#endif
6655
6656 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
6657 AssertRCReturn(rc, rc);
6658
6659 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
6660 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
6661
6662 /* Paranoia. */
6663 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
6664 pCtx->dr[7] |= X86_DR7_RA1_MASK;
6665
6666 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
6667 AssertRC(rc);
6668
6669 /*
6670 * Raise #DB in the guest.
6671 *
6672 * It is important to reflect exactly what the VM-exit gave us (preserving the
6673 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
6674 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
6675 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
6676 *
6677 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
6678 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6679 */
6680 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6681 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6682 return VINF_SUCCESS;
6683 }
6684
6685 /*
6686 * Not a guest trap, must be a hypervisor related debug event then.
6687 * Update DR6 in case someone is interested in it.
6688 */
6689 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
6690 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
6691 CPUMSetHyperDR6(pVCpu, uDR6);
6692
6693 return rc;
6694}
6695
6696
6697/**
6698 * Hacks its way around the lovely mesa driver's backdoor accesses.
6699 *
6700 * @sa hmR0SvmHandleMesaDrvGp.
6701 */
6702static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6703{
6704 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
6705 RT_NOREF(pCtx);
6706
6707 /* For now we'll just skip the instruction. */
6708 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
6709}
6710
6711
6712/**
6713 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
6714 * backdoor logging w/o checking what it is running inside.
6715 *
6716 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
6717 * backdoor port and magic numbers loaded in registers.
6718 *
6719 * @returns true if it is, false if it isn't.
6720 * @sa hmR0SvmIsMesaDrvGp.
6721 */
6722DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
6723{
6724 /* 0xed: IN eAX,dx */
6725 uint8_t abInstr[1];
6726 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
6727 return false;
6728
6729 /* Check that it is #GP(0). */
6730 if (pVmxTransient->uExitIntErrorCode != 0)
6731 return false;
6732
6733 /* Check magic and port. */
6734 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
6735 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
6736 if (pCtx->rax != UINT32_C(0x564d5868))
6737 return false;
6738 if (pCtx->dx != UINT32_C(0x5658))
6739 return false;
6740
6741 /* Flat ring-3 CS. */
6742 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
6743 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
6744 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
6745 if (pCtx->cs.Attr.n.u2Dpl != 3)
6746 return false;
6747 if (pCtx->cs.u64Base != 0)
6748 return false;
6749
6750 /* Check opcode. */
6751 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
6752 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
6753 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
6754 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
6755 if (RT_FAILURE(rc))
6756 return false;
6757 if (abInstr[0] != 0xed)
6758 return false;
6759
6760 return true;
6761}
6762
6763
6764/**
6765 * VM-exit exception handler for \#GP (General-protection exception).
6766 *
6767 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6768 */
6769static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6770{
6771 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6772 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
6773
6774 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6775 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6776#ifndef IN_NEM_DARWIN
6777 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6778 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6779 { /* likely */ }
6780 else
6781#endif
6782 {
6783#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6784# ifndef IN_NEM_DARWIN
6785 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6786# else
6787 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
6788# endif
6789#endif
6790 /*
6791 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
6792 * executing a nested-guest, reflect #GP to the guest or nested-guest.
6793 */
6794 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6795 AssertRCReturn(rc, rc);
6796 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
6797 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
6798
6799 if ( pVmxTransient->fIsNestedGuest
6800 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
6801 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
6802 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6803 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6804 else
6805 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
6806 return rc;
6807 }
6808
6809#ifndef IN_NEM_DARWIN
6810 Assert(CPUMIsGuestInRealModeEx(pCtx));
6811 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
6812 Assert(!pVmxTransient->fIsNestedGuest);
6813
6814 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
6815 AssertRCReturn(rc, rc);
6816
6817 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6818 if (rcStrict == VINF_SUCCESS)
6819 {
6820 if (!CPUMIsGuestInRealModeEx(pCtx))
6821 {
6822 /*
6823 * The guest is no longer in real-mode, check if we can continue executing the
6824 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
6825 */
6826 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6827 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
6828 {
6829 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
6830 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6831 }
6832 else
6833 {
6834 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
6835 rcStrict = VINF_EM_RESCHEDULE;
6836 }
6837 }
6838 else
6839 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6840 }
6841 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6842 {
6843 rcStrict = VINF_SUCCESS;
6844 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6845 }
6846 return VBOXSTRICTRC_VAL(rcStrict);
6847#endif
6848}
6849
6850
6851/**
6852 * VM-exit exception handler wrapper for all other exceptions that are not handled
6853 * by a specific handler.
6854 *
6855 * This simply re-injects the exception back into the VM without any special
6856 * processing.
6857 *
6858 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
6859 */
6860static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6861{
6862 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6863
6864#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6865# ifndef IN_NEM_DARWIN
6866 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6867 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
6868 ("uVector=%#x u32XcptBitmap=%#X32\n",
6869 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
6870 NOREF(pVmcsInfo);
6871# endif
6872#endif
6873
6874 /*
6875 * Re-inject the exception into the guest. This cannot be a double-fault condition which
6876 * would have been handled while checking exits due to event delivery.
6877 */
6878 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6879
6880#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
6881 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6882 AssertRCReturn(rc, rc);
6883 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6884#endif
6885
6886#ifdef VBOX_WITH_STATISTICS
6887 switch (uVector)
6888 {
6889 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
6890 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
6891 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
6892 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6893 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
6894 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
6895 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
6896 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
6897 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
6898 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
6899 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
6900 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
6901 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
6902 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
6903 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
6904 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
6905 default:
6906 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
6907 break;
6908 }
6909#endif
6910
6911 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
6912 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
6913 NOREF(uVector);
6914
6915 /* Re-inject the original exception into the guest. */
6916 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
6917 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
6918 return VINF_SUCCESS;
6919}
6920
6921
6922/**
6923 * VM-exit exception handler for all exceptions (except NMIs!).
6924 *
6925 * @remarks This may be called for both guests and nested-guests. Take care to not
6926 * make assumptions and avoid doing anything that is not relevant when
6927 * executing a nested-guest (e.g., Mesa driver hacks).
6928 */
6929static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6930{
6931 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
6932
6933 /*
6934 * If this VM-exit occurred while delivering an event through the guest IDT, take
6935 * action based on the return code and additional hints (e.g. for page-faults)
6936 * that will be updated in the VMX transient structure.
6937 */
6938 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
6939 if (rcStrict == VINF_SUCCESS)
6940 {
6941 /*
6942 * If an exception caused a VM-exit due to delivery of an event, the original
6943 * event may have to be re-injected into the guest. We shall reinject it and
6944 * continue guest execution. However, page-fault is a complicated case and
6945 * needs additional processing done in vmxHCExitXcptPF().
6946 */
6947 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
6948 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
6949 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
6950 || uVector == X86_XCPT_PF)
6951 {
6952 switch (uVector)
6953 {
6954 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
6955 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
6956 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
6957 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
6958 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
6959 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
6960 default:
6961 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
6962 }
6963 }
6964 /* else: inject pending event before resuming guest execution. */
6965 }
6966 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
6967 {
6968 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6969 rcStrict = VINF_SUCCESS;
6970 }
6971
6972 return rcStrict;
6973}
6974/** @} */
6975
6976
6977/** @name VM-exit handlers.
6978 * @{
6979 */
6980/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6981/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6982/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6983
6984/**
6985 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
6986 */
6987HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6988{
6989 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
6990 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
6991
6992#ifndef IN_NEM_DARWIN
6993 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
6994 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
6995 return VINF_SUCCESS;
6996 return VINF_EM_RAW_INTERRUPT;
6997#else
6998 return VINF_SUCCESS;
6999#endif
7000}
7001
7002
7003/**
7004 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
7005 * VM-exit.
7006 */
7007HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7008{
7009 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7010 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7011
7012 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
7013
7014 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
7015 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
7016 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
7017
7018 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7019 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
7020 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
7021 NOREF(pVmcsInfo);
7022
7023 VBOXSTRICTRC rcStrict;
7024 switch (uExitIntType)
7025 {
7026#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
7027 /*
7028 * Host physical NMIs:
7029 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
7030 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
7031 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
7032 *
7033 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
7034 * See Intel spec. 27.5.5 "Updating Non-Register State".
7035 */
7036 case VMX_EXIT_INT_INFO_TYPE_NMI:
7037 {
7038 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
7039 break;
7040 }
7041#endif
7042
7043 /*
7044 * Privileged software exceptions (#DB from ICEBP),
7045 * Software exceptions (#BP and #OF),
7046 * Hardware exceptions:
7047 * Process the required exceptions and resume guest execution if possible.
7048 */
7049 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
7050 Assert(uVector == X86_XCPT_DB);
7051 RT_FALL_THRU();
7052 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
7053 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
7054 RT_FALL_THRU();
7055 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
7056 {
7057 NOREF(uVector);
7058 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
7059 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7060 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
7061 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
7062
7063 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
7064 break;
7065 }
7066
7067 default:
7068 {
7069 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
7070 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
7071 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
7072 break;
7073 }
7074 }
7075
7076 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
7077 return rcStrict;
7078}
7079
7080
7081/**
7082 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7083 */
7084HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7085{
7086 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7087
7088 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7089 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7090 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
7091
7092 /* Evaluate and deliver pending events and resume guest execution. */
7093 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
7094 return VINF_SUCCESS;
7095}
7096
7097
7098/**
7099 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7100 */
7101HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7102{
7103 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7104
7105 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7106 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
7107 {
7108 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7109 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7110 }
7111
7112 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
7113
7114 /*
7115 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
7116 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
7117 */
7118 uint32_t fIntrState;
7119 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
7120 AssertRC(rc);
7121 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
7122 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7123 {
7124 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7125 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7126
7127 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
7128 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
7129 AssertRC(rc);
7130 }
7131
7132 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
7133 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
7134
7135 /* Evaluate and deliver pending events and resume guest execution. */
7136 return VINF_SUCCESS;
7137}
7138
7139
7140/**
7141 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7142 */
7143HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7144{
7145 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7146 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7147}
7148
7149
7150/**
7151 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7152 */
7153HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7154{
7155 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7156 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7157}
7158
7159
7160/**
7161 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7162 */
7163HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7164{
7165 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7166
7167 /*
7168 * Get the state we need and update the exit history entry.
7169 */
7170 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7171 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7172
7173 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7174 AssertRCReturn(rc, rc);
7175
7176 VBOXSTRICTRC rcStrict;
7177 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
7178 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
7179 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
7180 if (!pExitRec)
7181 {
7182 /*
7183 * Regular CPUID instruction execution.
7184 */
7185 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
7186 if (rcStrict == VINF_SUCCESS)
7187 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7188 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7189 {
7190 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7191 rcStrict = VINF_SUCCESS;
7192 }
7193 }
7194 else
7195 {
7196 /*
7197 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
7198 */
7199 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7200 AssertRCReturn(rc2, rc2);
7201
7202 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
7203 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
7204
7205 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
7206 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
7207
7208 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
7209 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
7210 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7211 }
7212 return rcStrict;
7213}
7214
7215
7216/**
7217 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7218 */
7219HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7220{
7221 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7222
7223 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7224 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
7225 AssertRCReturn(rc, rc);
7226
7227 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
7228 return VINF_EM_RAW_EMULATE_INSTR;
7229
7230 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
7231 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7232}
7233
7234
7235/**
7236 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7237 */
7238HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7239{
7240 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7241
7242 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7243 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7244 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
7245 AssertRCReturn(rc, rc);
7246
7247 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
7248 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7249 {
7250 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7251 we must reset offsetting on VM-entry. See @bugref{6634}. */
7252 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7253 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7254 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7255 }
7256 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7257 {
7258 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7259 rcStrict = VINF_SUCCESS;
7260 }
7261 return rcStrict;
7262}
7263
7264
7265/**
7266 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7267 */
7268HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7269{
7270 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7271
7272 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7273 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7274 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
7275 AssertRCReturn(rc, rc);
7276
7277 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
7278 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7279 {
7280 /* If we get a spurious VM-exit when TSC offsetting is enabled,
7281 we must reset offsetting on VM-reentry. See @bugref{6634}. */
7282 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
7283 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7284 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7285 }
7286 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7287 {
7288 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7289 rcStrict = VINF_SUCCESS;
7290 }
7291 return rcStrict;
7292}
7293
7294
7295/**
7296 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7297 */
7298HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7299{
7300 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7301
7302 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7303 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
7304 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
7305 AssertRCReturn(rc, rc);
7306
7307 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7308 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7309 if (RT_LIKELY(rc == VINF_SUCCESS))
7310 {
7311 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7312 Assert(pVmxTransient->cbExitInstr == 2);
7313 }
7314 else
7315 {
7316 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7317 rc = VERR_EM_INTERPRETER;
7318 }
7319 return rc;
7320}
7321
7322
7323/**
7324 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
7325 */
7326HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7327{
7328 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7329
7330 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
7331 if (EMAreHypercallInstructionsEnabled(pVCpu))
7332 {
7333 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7334 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
7335 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7336 AssertRCReturn(rc, rc);
7337
7338 /* Perform the hypercall. */
7339 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7340 if (rcStrict == VINF_SUCCESS)
7341 {
7342 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7343 AssertRCReturn(rc, rc);
7344 }
7345 else
7346 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
7347 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
7348 || RT_FAILURE(rcStrict));
7349
7350 /* If the hypercall changes anything other than guest's general-purpose registers,
7351 we would need to reload the guest changed bits here before VM-entry. */
7352 }
7353 else
7354 Log4Func(("Hypercalls not enabled\n"));
7355
7356 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
7357 if (RT_FAILURE(rcStrict))
7358 {
7359 vmxHCSetPendingXcptUD(pVCpu);
7360 rcStrict = VINF_SUCCESS;
7361 }
7362
7363 return rcStrict;
7364}
7365
7366
7367/**
7368 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7369 */
7370HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7371{
7372 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7373#ifndef IN_NEM_DARWIN
7374 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
7375#endif
7376
7377 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7378 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7379 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7380 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7381 AssertRCReturn(rc, rc);
7382
7383 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
7384
7385 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
7386 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7387 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7388 {
7389 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7390 rcStrict = VINF_SUCCESS;
7391 }
7392 else
7393 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
7394 VBOXSTRICTRC_VAL(rcStrict)));
7395 return rcStrict;
7396}
7397
7398
7399/**
7400 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7401 */
7402HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7403{
7404 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7405
7406 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7407 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7408 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
7409 AssertRCReturn(rc, rc);
7410
7411 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
7412 if (rcStrict == VINF_SUCCESS)
7413 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7414 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7415 {
7416 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7417 rcStrict = VINF_SUCCESS;
7418 }
7419
7420 return rcStrict;
7421}
7422
7423
7424/**
7425 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7426 */
7427HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7428{
7429 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7430
7431 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7432 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7433 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7434 AssertRCReturn(rc, rc);
7435
7436 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
7437 if (RT_SUCCESS(rcStrict))
7438 {
7439 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7440 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
7441 rcStrict = VINF_SUCCESS;
7442 }
7443
7444 return rcStrict;
7445}
7446
7447
7448/**
7449 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7450 * VM-exit.
7451 */
7452HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7453{
7454 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7455 return VINF_EM_RESET;
7456}
7457
7458
7459/**
7460 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7461 */
7462HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7463{
7464 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7465
7466 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7467 AssertRCReturn(rc, rc);
7468
7469 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
7470 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
7471 rc = VINF_SUCCESS;
7472 else
7473 rc = VINF_EM_HALT;
7474
7475 if (rc != VINF_SUCCESS)
7476 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
7477 return rc;
7478}
7479
7480
7481/**
7482 * VM-exit handler for instructions that result in a \#UD exception delivered to
7483 * the guest.
7484 */
7485HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7486{
7487 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7488 vmxHCSetPendingXcptUD(pVCpu);
7489 return VINF_SUCCESS;
7490}
7491
7492
7493/**
7494 * VM-exit handler for expiry of the VMX-preemption timer.
7495 */
7496HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7497{
7498 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7499
7500 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
7501 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7502Log12(("vmxHCExitPreemptTimer:\n"));
7503
7504 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7505 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7506 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7507 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
7508 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7509}
7510
7511
7512/**
7513 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7514 */
7515HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7516{
7517 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7518
7519 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7520 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7521 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
7522 AssertRCReturn(rc, rc);
7523
7524 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
7525 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7526 : HM_CHANGED_RAISED_XCPT_MASK);
7527
7528#ifndef IN_NEM_DARWIN
7529 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7530 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
7531 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
7532 {
7533 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
7534 hmR0VmxUpdateStartVmFunction(pVCpu);
7535 }
7536#endif
7537
7538 return rcStrict;
7539}
7540
7541
7542/**
7543 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7544 */
7545HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7546{
7547 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7548
7549 /** @todo Enable the new code after finding a reliably guest test-case. */
7550#if 1
7551 return VERR_EM_INTERPRETER;
7552#else
7553 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7554 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
7555 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7556 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
7557 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7558 AssertRCReturn(rc, rc);
7559
7560 /* Paranoia. Ensure this has a memory operand. */
7561 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
7562
7563 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
7564 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
7565 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
7566 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
7567
7568 RTGCPTR GCPtrDesc;
7569 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
7570
7571 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
7572 GCPtrDesc, uType);
7573 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7574 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7575 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7576 {
7577 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7578 rcStrict = VINF_SUCCESS;
7579 }
7580 return rcStrict;
7581#endif
7582}
7583
7584
7585/**
7586 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
7587 * VM-exit.
7588 */
7589HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7590{
7591 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7592 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
7593 AssertRCReturn(rc, rc);
7594
7595 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
7596 if (RT_FAILURE(rc))
7597 return rc;
7598
7599 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
7600 NOREF(uInvalidReason);
7601
7602#ifdef VBOX_STRICT
7603 uint32_t fIntrState;
7604 uint64_t u64Val;
7605 vmxHCReadEntryIntInfoVmcs(pVCpu, pVmxTransient);
7606 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
7607 vmxHCReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7608
7609 Log4(("uInvalidReason %u\n", uInvalidReason));
7610 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
7611 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7612 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7613
7614 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
7615 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
7616 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
7617 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
7618 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
7619 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
7620 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
7621 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7622 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
7623 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
7624 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
7625 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
7626# ifndef IN_NEM_DARWIN
7627 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
7628 {
7629 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7630 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7631 }
7632
7633 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
7634# endif
7635#endif
7636
7637 return VERR_VMX_INVALID_GUEST_STATE;
7638}
7639
7640/**
7641 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
7642 */
7643HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7644{
7645 /*
7646 * Cumulative notes of all recognized but unexpected VM-exits.
7647 *
7648 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
7649 * nested-paging is used.
7650 *
7651 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
7652 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
7653 * this function (and thereby stop VM execution) for handling such instructions.
7654 *
7655 *
7656 * VMX_EXIT_INIT_SIGNAL:
7657 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
7658 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
7659 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
7660 *
7661 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
7662 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
7663 * See Intel spec. "23.8 Restrictions on VMX operation".
7664 *
7665 * VMX_EXIT_SIPI:
7666 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
7667 * activity state is used. We don't make use of it as our guests don't have direct
7668 * access to the host local APIC.
7669 *
7670 * See Intel spec. 25.3 "Other Causes of VM-exits".
7671 *
7672 * VMX_EXIT_IO_SMI:
7673 * VMX_EXIT_SMI:
7674 * This can only happen if we support dual-monitor treatment of SMI, which can be
7675 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
7676 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
7677 * VMX root mode or receive an SMI. If we get here, something funny is going on.
7678 *
7679 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
7680 * See Intel spec. 25.3 "Other Causes of VM-Exits"
7681 *
7682 * VMX_EXIT_ERR_MSR_LOAD:
7683 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
7684 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
7685 * execution.
7686 *
7687 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
7688 *
7689 * VMX_EXIT_ERR_MACHINE_CHECK:
7690 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
7691 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
7692 * #MC exception abort class exception is raised. We thus cannot assume a
7693 * reasonable chance of continuing any sort of execution and we bail.
7694 *
7695 * See Intel spec. 15.1 "Machine-check Architecture".
7696 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
7697 *
7698 * VMX_EXIT_PML_FULL:
7699 * VMX_EXIT_VIRTUALIZED_EOI:
7700 * VMX_EXIT_APIC_WRITE:
7701 * We do not currently support any of these features and thus they are all unexpected
7702 * VM-exits.
7703 *
7704 * VMX_EXIT_GDTR_IDTR_ACCESS:
7705 * VMX_EXIT_LDTR_TR_ACCESS:
7706 * VMX_EXIT_RDRAND:
7707 * VMX_EXIT_RSM:
7708 * VMX_EXIT_VMFUNC:
7709 * VMX_EXIT_ENCLS:
7710 * VMX_EXIT_RDSEED:
7711 * VMX_EXIT_XSAVES:
7712 * VMX_EXIT_XRSTORS:
7713 * VMX_EXIT_UMWAIT:
7714 * VMX_EXIT_TPAUSE:
7715 * VMX_EXIT_LOADIWKEY:
7716 * These VM-exits are -not- caused unconditionally by execution of the corresponding
7717 * instruction. Any VM-exit for these instructions indicate a hardware problem,
7718 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
7719 *
7720 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
7721 */
7722 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7723 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
7724 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
7725}
7726
7727
7728/**
7729 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7730 */
7731HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7732{
7733 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7734
7735 /** @todo Optimize this: We currently drag in the whole MSR state
7736 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7737 * MSRs required. That would require changes to IEM and possibly CPUM too.
7738 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7739 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7740 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7741 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7742 switch (idMsr)
7743 {
7744 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7745 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7746 }
7747
7748 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7749 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7750 AssertRCReturn(rc, rc);
7751
7752 Log4Func(("ecx=%#RX32\n", idMsr));
7753
7754#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7755 Assert(!pVmxTransient->fIsNestedGuest);
7756 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7757 {
7758 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
7759 && idMsr != MSR_K6_EFER)
7760 {
7761 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
7762 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7763 }
7764 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7765 {
7766 Assert(pVmcsInfo->pvMsrBitmap);
7767 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7768 if (fMsrpm & VMXMSRPM_ALLOW_RD)
7769 {
7770 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
7771 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7772 }
7773 }
7774 }
7775#endif
7776
7777 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
7778 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
7779 if (rcStrict == VINF_SUCCESS)
7780 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7781 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7782 {
7783 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7784 rcStrict = VINF_SUCCESS;
7785 }
7786 else
7787 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
7788 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7789
7790 return rcStrict;
7791}
7792
7793
7794/**
7795 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7796 */
7797HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7798{
7799 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7800
7801 /** @todo Optimize this: We currently drag in the whole MSR state
7802 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
7803 * MSRs required. That would require changes to IEM and possibly CPUM too.
7804 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
7805 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7806 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
7807
7808 /*
7809 * The FS and GS base MSRs are not part of the above all-MSRs mask.
7810 * Although we don't need to fetch the base as it will be overwritten shortly, while
7811 * loading guest-state we would also load the entire segment register including limit
7812 * and attributes and thus we need to load them here.
7813 */
7814 switch (idMsr)
7815 {
7816 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
7817 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
7818 }
7819
7820 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7821 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7822 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
7823 AssertRCReturn(rc, rc);
7824
7825 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
7826
7827 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
7828 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
7829
7830 if (rcStrict == VINF_SUCCESS)
7831 {
7832 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
7833
7834 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7835 if ( idMsr == MSR_IA32_APICBASE
7836 || ( idMsr >= MSR_IA32_X2APIC_START
7837 && idMsr <= MSR_IA32_X2APIC_END))
7838 {
7839 /*
7840 * We've already saved the APIC related guest-state (TPR) in post-run phase.
7841 * When full APIC register virtualization is implemented we'll have to make
7842 * sure APIC state is saved from the VMCS before IEM changes it.
7843 */
7844 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
7845 }
7846 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7847 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
7848 else if (idMsr == MSR_K6_EFER)
7849 {
7850 /*
7851 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
7852 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
7853 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
7854 */
7855 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
7856 }
7857
7858 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
7859 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
7860 {
7861 switch (idMsr)
7862 {
7863 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
7864 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
7865 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
7866 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
7867 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
7868 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
7869 default:
7870 {
7871#ifndef IN_NEM_DARWIN
7872 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7873 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
7874 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7875 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
7876#else
7877 AssertMsgFailed(("TODO\n"));
7878#endif
7879 break;
7880 }
7881 }
7882 }
7883#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
7884 else
7885 {
7886 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7887 switch (idMsr)
7888 {
7889 case MSR_IA32_SYSENTER_CS:
7890 case MSR_IA32_SYSENTER_EIP:
7891 case MSR_IA32_SYSENTER_ESP:
7892 case MSR_K8_FS_BASE:
7893 case MSR_K8_GS_BASE:
7894 {
7895 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
7896 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7897 }
7898
7899 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
7900 default:
7901 {
7902 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
7903 {
7904 /* EFER MSR writes are always intercepted. */
7905 if (idMsr != MSR_K6_EFER)
7906 {
7907 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7908 idMsr));
7909 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7910 }
7911 }
7912
7913 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
7914 {
7915 Assert(pVmcsInfo->pvMsrBitmap);
7916 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
7917 if (fMsrpm & VMXMSRPM_ALLOW_WR)
7918 {
7919 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
7920 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
7921 }
7922 }
7923 break;
7924 }
7925 }
7926 }
7927#endif /* VBOX_STRICT */
7928 }
7929 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7930 {
7931 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7932 rcStrict = VINF_SUCCESS;
7933 }
7934 else
7935 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
7936 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
7937
7938 return rcStrict;
7939}
7940
7941
7942/**
7943 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7944 */
7945HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7946{
7947 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7948
7949 /** @todo The guest has likely hit a contended spinlock. We might want to
7950 * poke a schedule different guest VCPU. */
7951 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
7952 if (RT_SUCCESS(rc))
7953 return VINF_EM_RAW_INTERRUPT;
7954
7955 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
7956 return rc;
7957}
7958
7959
7960/**
7961 * VM-exit handler for when the TPR value is lowered below the specified
7962 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
7963 */
7964HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7965{
7966 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7967 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
7968
7969 /*
7970 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
7971 * We'll re-evaluate pending interrupts and inject them before the next VM
7972 * entry so we can just continue execution here.
7973 */
7974 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
7975 return VINF_SUCCESS;
7976}
7977
7978
7979/**
7980 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
7981 * VM-exit.
7982 *
7983 * @retval VINF_SUCCESS when guest execution can continue.
7984 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
7985 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
7986 * incompatible guest state for VMX execution (real-on-v86 case).
7987 */
7988HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
7989{
7990 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
7991 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
7992
7993 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
7994 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
7995 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7996
7997 VBOXSTRICTRC rcStrict;
7998 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7999 uint64_t const uExitQual = pVmxTransient->uExitQual;
8000 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
8001 switch (uAccessType)
8002 {
8003 /*
8004 * MOV to CRx.
8005 */
8006 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
8007 {
8008 /*
8009 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
8010 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
8011 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
8012 * PAE PDPTEs as well.
8013 */
8014 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8015 AssertRCReturn(rc, rc);
8016
8017 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8018#ifndef IN_NEM_DARWIN
8019 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
8020#endif
8021 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8022 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8023
8024 /*
8025 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
8026 * - When nested paging isn't used.
8027 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
8028 * - We are executing in the VM debug loop.
8029 */
8030#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8031# ifndef IN_NEM_DARWIN
8032 Assert( iCrReg != 3
8033 || !VM_IS_VMX_NESTED_PAGING(pVM)
8034 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8035 || pVCpu->hmr0.s.fUsingDebugLoop);
8036# else
8037 Assert( iCrReg != 3
8038 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8039# endif
8040#endif
8041
8042 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
8043 Assert( iCrReg != 8
8044 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8045
8046 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8047 AssertMsg( rcStrict == VINF_SUCCESS
8048 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8049
8050#ifndef IN_NEM_DARWIN
8051 /*
8052 * This is a kludge for handling switches back to real mode when we try to use
8053 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
8054 * deal with special selector values, so we have to return to ring-3 and run
8055 * there till the selector values are V86 mode compatible.
8056 *
8057 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
8058 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
8059 * this function.
8060 */
8061 if ( iCrReg == 0
8062 && rcStrict == VINF_SUCCESS
8063 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
8064 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
8065 && (uOldCr0 & X86_CR0_PE)
8066 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
8067 {
8068 /** @todo Check selectors rather than returning all the time. */
8069 Assert(!pVmxTransient->fIsNestedGuest);
8070 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
8071 rcStrict = VINF_EM_RESCHEDULE_REM;
8072 }
8073#endif
8074
8075 break;
8076 }
8077
8078 /*
8079 * MOV from CRx.
8080 */
8081 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
8082 {
8083 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
8084 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
8085
8086 /*
8087 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
8088 * - When nested paging isn't used.
8089 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
8090 * - We are executing in the VM debug loop.
8091 */
8092#ifndef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
8093# ifndef IN_NEM_DARWIN
8094 Assert( iCrReg != 3
8095 || !VM_IS_VMX_NESTED_PAGING(pVM)
8096 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
8097 || pVCpu->hmr0.s.fLeaveDone);
8098# else
8099 Assert( iCrReg != 3
8100 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
8101# endif
8102#endif
8103
8104 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
8105 Assert( iCrReg != 8
8106 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
8107
8108 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
8109 break;
8110 }
8111
8112 /*
8113 * CLTS (Clear Task-Switch Flag in CR0).
8114 */
8115 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
8116 {
8117 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
8118 break;
8119 }
8120
8121 /*
8122 * LMSW (Load Machine-Status Word into CR0).
8123 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
8124 */
8125 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
8126 {
8127 RTGCPTR GCPtrEffDst;
8128 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
8129 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
8130 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
8131 if (fMemOperand)
8132 {
8133 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
8134 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
8135 }
8136 else
8137 GCPtrEffDst = NIL_RTGCPTR;
8138 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
8139 break;
8140 }
8141
8142 default:
8143 {
8144 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
8145 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
8146 }
8147 }
8148
8149 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
8150 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
8151 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8152
8153 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
8154 NOREF(pVM);
8155 return rcStrict;
8156}
8157
8158
8159/**
8160 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8161 * VM-exit.
8162 */
8163HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8164{
8165 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8166 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8167
8168 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8169 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8170 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8171 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8172 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
8173 | CPUMCTX_EXTRN_EFER);
8174 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8175 AssertRCReturn(rc, rc);
8176
8177 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8178 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
8179 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
8180 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
8181 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
8182 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
8183 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
8184 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
8185
8186 /*
8187 * Update exit history to see if this exit can be optimized.
8188 */
8189 VBOXSTRICTRC rcStrict;
8190 PCEMEXITREC pExitRec = NULL;
8191 if ( !fGstStepping
8192 && !fDbgStepping)
8193 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8194 !fIOString
8195 ? !fIOWrite
8196 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
8197 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
8198 : !fIOWrite
8199 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
8200 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
8201 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8202 if (!pExitRec)
8203 {
8204 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
8205 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
8206
8207 uint32_t const cbValue = s_aIOSizes[uIOSize];
8208 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
8209 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
8210 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8211 if (fIOString)
8212 {
8213 /*
8214 * INS/OUTS - I/O String instruction.
8215 *
8216 * Use instruction-information if available, otherwise fall back on
8217 * interpreting the instruction.
8218 */
8219 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8220 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
8221 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
8222 if (fInsOutsInfo)
8223 {
8224 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8225 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
8226 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
8227 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
8228 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
8229 if (fIOWrite)
8230 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
8231 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
8232 else
8233 {
8234 /*
8235 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
8236 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
8237 * See Intel Instruction spec. for "INS".
8238 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
8239 */
8240 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
8241 }
8242 }
8243 else
8244 rcStrict = IEMExecOne(pVCpu);
8245
8246 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8247 fUpdateRipAlready = true;
8248 }
8249 else
8250 {
8251 /*
8252 * IN/OUT - I/O instruction.
8253 */
8254 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
8255 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
8256 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
8257 if (fIOWrite)
8258 {
8259 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
8260 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
8261#ifndef IN_NEM_DARWIN
8262 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8263 && !pCtx->eflags.Bits.u1TF)
8264 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
8265#endif
8266 }
8267 else
8268 {
8269 uint32_t u32Result = 0;
8270 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
8271 if (IOM_SUCCESS(rcStrict))
8272 {
8273 /* Save result of I/O IN instr. in AL/AX/EAX. */
8274 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8275 }
8276#ifndef IN_NEM_DARWIN
8277 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8278 && !pCtx->eflags.Bits.u1TF)
8279 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
8280#endif
8281 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
8282 }
8283 }
8284
8285 if (IOM_SUCCESS(rcStrict))
8286 {
8287 if (!fUpdateRipAlready)
8288 {
8289 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
8290 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
8291 }
8292
8293 /*
8294 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
8295 * while booting Fedora 17 64-bit guest.
8296 *
8297 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
8298 */
8299 if (fIOString)
8300 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
8301
8302 /*
8303 * If any I/O breakpoints are armed, we need to check if one triggered
8304 * and take appropriate action.
8305 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
8306 */
8307 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
8308 AssertRCReturn(rc, rc);
8309
8310 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
8311 * execution engines about whether hyper BPs and such are pending. */
8312 uint32_t const uDr7 = pCtx->dr[7];
8313 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
8314 && X86_DR7_ANY_RW_IO(uDr7)
8315 && (pCtx->cr4 & X86_CR4_DE))
8316 || DBGFBpIsHwIoArmed(pVM)))
8317 {
8318 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
8319
8320#ifndef IN_NEM_DARWIN
8321 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
8322 VMMRZCallRing3Disable(pVCpu);
8323 HM_DISABLE_PREEMPT(pVCpu);
8324
8325 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
8326
8327 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
8328 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
8329 {
8330 /* Raise #DB. */
8331 if (fIsGuestDbgActive)
8332 ASMSetDR6(pCtx->dr[6]);
8333 if (pCtx->dr[7] != uDr7)
8334 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
8335
8336 vmxHCSetPendingXcptDB(pVCpu);
8337 }
8338 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
8339 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
8340 else if ( rcStrict2 != VINF_SUCCESS
8341 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
8342 rcStrict = rcStrict2;
8343 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
8344
8345 HM_RESTORE_PREEMPT();
8346 VMMRZCallRing3Enable(pVCpu);
8347#else
8348 /** @todo */
8349#endif
8350 }
8351 }
8352
8353#ifdef VBOX_STRICT
8354 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
8355 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
8356 Assert(!fIOWrite);
8357 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
8358 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
8359 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
8360 Assert(fIOWrite);
8361 else
8362 {
8363# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
8364 * statuses, that the VMM device and some others may return. See
8365 * IOM_SUCCESS() for guidance. */
8366 AssertMsg( RT_FAILURE(rcStrict)
8367 || rcStrict == VINF_SUCCESS
8368 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
8369 || rcStrict == VINF_EM_DBG_BREAKPOINT
8370 || rcStrict == VINF_EM_RAW_GUEST_TRAP
8371 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8372# endif
8373 }
8374#endif
8375 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
8376 }
8377 else
8378 {
8379 /*
8380 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
8381 */
8382 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8383 AssertRCReturn(rc2, rc2);
8384 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
8385 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
8386 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
8387 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8388 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
8389 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
8390
8391 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8392 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8393
8394 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8395 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8396 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8397 }
8398 return rcStrict;
8399}
8400
8401
8402/**
8403 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8404 * VM-exit.
8405 */
8406HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8407{
8408 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8409
8410 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8411 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8412 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
8413 {
8414 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8415 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
8416 {
8417 uint32_t uErrCode;
8418 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
8419 {
8420 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8421 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
8422 }
8423 else
8424 uErrCode = 0;
8425
8426 RTGCUINTPTR GCPtrFaultAddress;
8427 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
8428 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
8429 else
8430 GCPtrFaultAddress = 0;
8431
8432 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8433
8434 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
8435 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
8436
8437 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
8438 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
8439 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8440 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8441 }
8442 }
8443
8444 /* Fall back to the interpreter to emulate the task-switch. */
8445 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
8446 return VERR_EM_INTERPRETER;
8447}
8448
8449
8450/**
8451 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8452 */
8453HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8454{
8455 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8456
8457 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8458 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
8459 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8460 AssertRC(rc);
8461 return VINF_EM_DBG_STEPPED;
8462}
8463
8464
8465/**
8466 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8467 */
8468HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8469{
8470 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8471 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
8472
8473 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8474 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8475 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8476 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8477 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8478
8479 /*
8480 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8481 */
8482 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8483 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8484 {
8485 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
8486 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
8487 {
8488 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8489 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8490 }
8491 }
8492 else
8493 {
8494 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8495 return rcStrict;
8496 }
8497
8498 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
8499 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8500 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8501 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8502 AssertRCReturn(rc, rc);
8503
8504 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8505 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
8506 switch (uAccessType)
8507 {
8508#ifndef IN_NEM_DARWIN
8509 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8510 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8511 {
8512 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8513 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
8514 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8515
8516 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
8517 GCPhys &= PAGE_BASE_GC_MASK;
8518 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
8519 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
8520 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
8521
8522 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
8523 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
8524 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8525 if ( rcStrict == VINF_SUCCESS
8526 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8527 || rcStrict == VERR_PAGE_NOT_PRESENT)
8528 {
8529 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8530 | HM_CHANGED_GUEST_APIC_TPR);
8531 rcStrict = VINF_SUCCESS;
8532 }
8533 break;
8534 }
8535#else
8536 /** @todo */
8537#endif
8538
8539 default:
8540 {
8541 Log4Func(("uAccessType=%#x\n", uAccessType));
8542 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
8543 break;
8544 }
8545 }
8546
8547 if (rcStrict != VINF_SUCCESS)
8548 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
8549 return rcStrict;
8550}
8551
8552
8553/**
8554 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8555 * VM-exit.
8556 */
8557HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8558{
8559 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8560 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8561
8562 /*
8563 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
8564 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
8565 * must emulate the MOV DRx access.
8566 */
8567 if (!pVmxTransient->fIsNestedGuest)
8568 {
8569 /* We should -not- get this VM-exit if the guest's debug registers were active. */
8570 if (pVmxTransient->fWasGuestDebugStateActive)
8571 {
8572 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
8573 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
8574 }
8575
8576 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
8577 && !pVmxTransient->fWasHyperDebugStateActive)
8578 {
8579 Assert(!DBGFIsStepping(pVCpu));
8580 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
8581
8582 /* Don't intercept MOV DRx any more. */
8583 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
8584 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8585 AssertRC(rc);
8586
8587#ifndef IN_NEM_DARWIN
8588 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
8589 VMMRZCallRing3Disable(pVCpu);
8590 HM_DISABLE_PREEMPT(pVCpu);
8591
8592 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8593 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
8594 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8595
8596 HM_RESTORE_PREEMPT();
8597 VMMRZCallRing3Enable(pVCpu);
8598#else
8599 CPUMR3NemActivateGuestDebugState(pVCpu);
8600 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8601 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
8602#endif
8603
8604#ifdef VBOX_WITH_STATISTICS
8605 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8606 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8607 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8608 else
8609 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8610#endif
8611 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
8612 return VINF_SUCCESS;
8613 }
8614 }
8615
8616 /*
8617 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
8618 * The EFER MSR is always up-to-date.
8619 * Update the segment registers and DR7 from the CPU.
8620 */
8621 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8622 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8623 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
8624 AssertRCReturn(rc, rc);
8625 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
8626
8627 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8628 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
8629 {
8630 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8631 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
8632 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
8633 if (RT_SUCCESS(rc))
8634 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
8635 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
8636 }
8637 else
8638 {
8639 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
8640 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
8641 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
8642 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
8643 }
8644
8645 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8646 if (RT_SUCCESS(rc))
8647 {
8648 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
8649 AssertRCReturn(rc2, rc2);
8650 return VINF_SUCCESS;
8651 }
8652 return rc;
8653}
8654
8655
8656/**
8657 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8658 * Conditional VM-exit.
8659 */
8660HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8661{
8662 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8663
8664#ifndef IN_NEM_DARWIN
8665 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8666
8667 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8668 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8669 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8670 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8671 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8672
8673 /*
8674 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8675 */
8676 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8677 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8678 {
8679 /*
8680 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
8681 * instruction emulation to inject the original event. Otherwise, injecting the original event
8682 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
8683 */
8684 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8685 { /* likely */ }
8686 else
8687 {
8688 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
8689#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8690 /** @todo NSTVMX: Think about how this should be handled. */
8691 if (pVmxTransient->fIsNestedGuest)
8692 return VERR_VMX_IPE_3;
8693#endif
8694 return VINF_EM_RAW_INJECT_TRPM_EVENT;
8695 }
8696 }
8697 else
8698 {
8699 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8700 return rcStrict;
8701 }
8702
8703 /*
8704 * Get sufficient state and update the exit history entry.
8705 */
8706 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8707 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8708 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8709 AssertRCReturn(rc, rc);
8710
8711 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8712 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
8713 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
8714 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
8715 if (!pExitRec)
8716 {
8717 /*
8718 * If we succeed, resume guest execution.
8719 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8720 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8721 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8722 * weird case. See @bugref{6043}.
8723 */
8724 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8725 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8726/** @todo bird: We can probably just go straight to IOM here and assume that
8727 * it's MMIO, then fall back on PGM if that hunch didn't work out so
8728 * well. However, we need to address that aliasing workarounds that
8729 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
8730 *
8731 * Might also be interesting to see if we can get this done more or
8732 * less locklessly inside IOM. Need to consider the lookup table
8733 * updating and use a bit more carefully first (or do all updates via
8734 * rendezvous) */
8735 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
8736 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
8737 if ( rcStrict == VINF_SUCCESS
8738 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8739 || rcStrict == VERR_PAGE_NOT_PRESENT)
8740 {
8741 /* Successfully handled MMIO operation. */
8742 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8743 | HM_CHANGED_GUEST_APIC_TPR);
8744 rcStrict = VINF_SUCCESS;
8745 }
8746 }
8747 else
8748 {
8749 /*
8750 * Frequent exit or something needing probing. Call EMHistoryExec.
8751 */
8752 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
8753 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
8754
8755 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8756 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8757
8758 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8759 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8760 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8761 }
8762 return rcStrict;
8763#else
8764 AssertFailed();
8765 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
8766#endif
8767}
8768
8769
8770/**
8771 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8772 * VM-exit.
8773 */
8774HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8775{
8776 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8777#ifndef IN_NEM_DARWIN
8778 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
8779
8780 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8781 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
8782 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8783 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8784 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
8785 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
8786
8787 /*
8788 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
8789 */
8790 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
8791 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8792 {
8793 /*
8794 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
8795 * we shall resolve the nested #PF and re-inject the original event.
8796 */
8797 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
8798 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
8799 }
8800 else
8801 {
8802 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
8803 return rcStrict;
8804 }
8805
8806 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8807 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8808 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
8809 AssertRCReturn(rc, rc);
8810
8811 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
8812 uint64_t const uExitQual = pVmxTransient->uExitQual;
8813 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
8814
8815 RTGCUINT uErrorCode = 0;
8816 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
8817 uErrorCode |= X86_TRAP_PF_ID;
8818 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8819 uErrorCode |= X86_TRAP_PF_RW;
8820 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
8821 uErrorCode |= X86_TRAP_PF_P;
8822
8823 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8824 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
8825
8826 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8827
8828 /*
8829 * Handle the pagefault trap for the nested shadow table.
8830 */
8831 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8832 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
8833 TRPMResetTrap(pVCpu);
8834
8835 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8836 if ( rcStrict == VINF_SUCCESS
8837 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
8838 || rcStrict == VERR_PAGE_NOT_PRESENT)
8839 {
8840 /* Successfully synced our nested page tables. */
8841 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
8842 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
8843 return VINF_SUCCESS;
8844 }
8845#else
8846 PVM pVM = pVCpu->CTX_SUFF(pVM);
8847 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
8848 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8849 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
8850 vmxHCImportGuestRip(pVCpu);
8851 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
8852
8853 /*
8854 * Ask PGM for information about the given GCPhys. We need to check if we're
8855 * out of sync first.
8856 */
8857 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
8858 PGMPHYSNEMPAGEINFO Info;
8859 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
8860 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
8861 if (RT_SUCCESS(rc))
8862 {
8863 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8864 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
8865 {
8866 if (State.fCanResume)
8867 {
8868 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
8869 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8870 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8871 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8872 State.fDidSomething ? "" : " no-change"));
8873 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
8874 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8875 return VINF_SUCCESS;
8876 }
8877 }
8878
8879 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
8880 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8881 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
8882 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
8883 State.fDidSomething ? "" : " no-change"));
8884 }
8885 else
8886 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
8887 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8888 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
8889
8890 /*
8891 * Emulate the memory access, either access handler or special memory.
8892 */
8893 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
8894 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
8895 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
8896 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
8897 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
8898
8899 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8900 AssertRCReturn(rc, rc);
8901
8902 VBOXSTRICTRC rcStrict;
8903 if (!pExitRec)
8904 rcStrict = IEMExecOne(pVCpu);
8905 else
8906 {
8907 /* Frequent access or probing. */
8908 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
8909 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
8910 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
8911 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
8912 }
8913
8914 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8915#endif
8916
8917 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8918 return rcStrict;
8919}
8920
8921
8922#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8923/**
8924 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
8925 */
8926HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8927{
8928 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8929
8930 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8931 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8932 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8933 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
8934 | CPUMCTX_EXTRN_HWVIRT
8935 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
8936 AssertRCReturn(rc, rc);
8937
8938 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8939
8940 VMXVEXITINFO ExitInfo;
8941 RT_ZERO(ExitInfo);
8942 ExitInfo.uReason = pVmxTransient->uExitReason;
8943 ExitInfo.u64Qual = pVmxTransient->uExitQual;
8944 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
8945 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
8946 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
8947
8948 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
8949 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8950 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
8951 else if (rcStrict == VINF_IEM_RAISED_XCPT)
8952 {
8953 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
8954 rcStrict = VINF_SUCCESS;
8955 }
8956 return rcStrict;
8957}
8958
8959
8960/**
8961 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
8962 */
8963HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8964{
8965 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8966
8967 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
8968 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
8969 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8970 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8971 AssertRCReturn(rc, rc);
8972
8973 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
8974
8975 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8976 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
8977 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
8978 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8979 {
8980 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
8981 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
8982 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
8983 }
8984 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
8985 return rcStrict;
8986}
8987
8988
8989/**
8990 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
8991 */
8992HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
8993{
8994 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
8995
8996 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8997 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8998 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8999 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9000 | CPUMCTX_EXTRN_HWVIRT
9001 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9002 AssertRCReturn(rc, rc);
9003
9004 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9005
9006 VMXVEXITINFO ExitInfo;
9007 RT_ZERO(ExitInfo);
9008 ExitInfo.uReason = pVmxTransient->uExitReason;
9009 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9010 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9011 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9012 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9013
9014 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
9015 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9016 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9017 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9018 {
9019 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9020 rcStrict = VINF_SUCCESS;
9021 }
9022 return rcStrict;
9023}
9024
9025
9026/**
9027 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
9028 */
9029HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9030{
9031 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9032
9033 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9034 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9035 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9036 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9037 | CPUMCTX_EXTRN_HWVIRT
9038 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9039 AssertRCReturn(rc, rc);
9040
9041 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9042
9043 VMXVEXITINFO ExitInfo;
9044 RT_ZERO(ExitInfo);
9045 ExitInfo.uReason = pVmxTransient->uExitReason;
9046 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9047 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9048 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9049 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9050
9051 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
9052 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9053 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9054 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9055 {
9056 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9057 rcStrict = VINF_SUCCESS;
9058 }
9059 return rcStrict;
9060}
9061
9062
9063/**
9064 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
9065 */
9066HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9067{
9068 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9069
9070 /*
9071 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
9072 * thus might not need to import the shadow VMCS state, it's safer just in case
9073 * code elsewhere dares look at unsynced VMCS fields.
9074 */
9075 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9076 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9077 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9078 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9079 | CPUMCTX_EXTRN_HWVIRT
9080 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9081 AssertRCReturn(rc, rc);
9082
9083 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9084
9085 VMXVEXITINFO ExitInfo;
9086 RT_ZERO(ExitInfo);
9087 ExitInfo.uReason = pVmxTransient->uExitReason;
9088 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9089 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9090 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9091 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9092 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
9093
9094 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
9095 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9096 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9097 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9098 {
9099 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9100 rcStrict = VINF_SUCCESS;
9101 }
9102 return rcStrict;
9103}
9104
9105
9106/**
9107 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
9108 */
9109HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9110{
9111 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9112
9113 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
9114 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
9115 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9116 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9117 AssertRCReturn(rc, rc);
9118
9119 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9120
9121 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9122 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
9123 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
9124 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9125 {
9126 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
9127 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
9128 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
9129 }
9130 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
9131 return rcStrict;
9132}
9133
9134
9135/**
9136 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
9137 */
9138HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9139{
9140 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9141
9142 /*
9143 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
9144 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
9145 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
9146 */
9147 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9148 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9149 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9150 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9151 | CPUMCTX_EXTRN_HWVIRT
9152 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9153 AssertRCReturn(rc, rc);
9154
9155 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9156
9157 VMXVEXITINFO ExitInfo;
9158 RT_ZERO(ExitInfo);
9159 ExitInfo.uReason = pVmxTransient->uExitReason;
9160 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9161 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9162 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9163 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
9164 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9165
9166 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
9167 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9168 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9169 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9170 {
9171 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9172 rcStrict = VINF_SUCCESS;
9173 }
9174 return rcStrict;
9175}
9176
9177
9178/**
9179 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
9180 */
9181HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9182{
9183 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9184
9185 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9186 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
9187 | CPUMCTX_EXTRN_HWVIRT
9188 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
9189 AssertRCReturn(rc, rc);
9190
9191 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9192
9193 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
9194 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9195 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
9196 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9197 {
9198 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9199 rcStrict = VINF_SUCCESS;
9200 }
9201 return rcStrict;
9202}
9203
9204
9205/**
9206 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
9207 */
9208HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9209{
9210 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9211
9212 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9213 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9214 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9215 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9216 | CPUMCTX_EXTRN_HWVIRT
9217 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9218 AssertRCReturn(rc, rc);
9219
9220 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9221
9222 VMXVEXITINFO ExitInfo;
9223 RT_ZERO(ExitInfo);
9224 ExitInfo.uReason = pVmxTransient->uExitReason;
9225 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9226 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9227 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9228 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9229
9230 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
9231 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9232 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
9233 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9234 {
9235 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9236 rcStrict = VINF_SUCCESS;
9237 }
9238 return rcStrict;
9239}
9240
9241
9242/**
9243 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
9244 */
9245HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9246{
9247 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9248
9249 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9250 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9251 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9252 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9253 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9254 AssertRCReturn(rc, rc);
9255
9256 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9257
9258 VMXVEXITINFO ExitInfo;
9259 RT_ZERO(ExitInfo);
9260 ExitInfo.uReason = pVmxTransient->uExitReason;
9261 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9262 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9263 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9264 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9265
9266 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
9267 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9268 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9269 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9270 {
9271 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9272 rcStrict = VINF_SUCCESS;
9273 }
9274 return rcStrict;
9275}
9276
9277
9278# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
9279/**
9280 * VM-exit handler for INVEPT (VMX_EXIT_INVEPT). Unconditional VM-exit.
9281 */
9282HMVMX_EXIT_DECL vmxHCExitInvept(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9283{
9284 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9285
9286 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9287 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9288 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9289 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
9290 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
9291 AssertRCReturn(rc, rc);
9292
9293 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
9294
9295 VMXVEXITINFO ExitInfo;
9296 RT_ZERO(ExitInfo);
9297 ExitInfo.uReason = pVmxTransient->uExitReason;
9298 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9299 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
9300 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9301 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
9302
9303 VBOXSTRICTRC rcStrict = IEMExecDecodedInvept(pVCpu, &ExitInfo);
9304 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9305 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
9306 else if (rcStrict == VINF_IEM_RAISED_XCPT)
9307 {
9308 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9309 rcStrict = VINF_SUCCESS;
9310 }
9311 return rcStrict;
9312}
9313# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
9314#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9315/** @} */
9316
9317
9318#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9319/** @name Nested-guest VM-exit handlers.
9320 * @{
9321 */
9322/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9323/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9324/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9325
9326/**
9327 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9328 * Conditional VM-exit.
9329 */
9330HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9331{
9332 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9333
9334 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
9335
9336 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9337 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9338 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
9339
9340 switch (uExitIntType)
9341 {
9342#ifndef IN_NEM_DARWIN
9343 /*
9344 * Physical NMIs:
9345 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
9346 */
9347 case VMX_EXIT_INT_INFO_TYPE_NMI:
9348 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9349#endif
9350
9351 /*
9352 * Hardware exceptions,
9353 * Software exceptions,
9354 * Privileged software exceptions:
9355 * Figure out if the exception must be delivered to the guest or the nested-guest.
9356 */
9357 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
9358 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
9359 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
9360 {
9361 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
9362 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9363 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9364 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9365
9366 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9367 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
9368 pVmxTransient->uExitIntErrorCode);
9369 if (fIntercept)
9370 {
9371 /* Exit qualification is required for debug and page-fault exceptions. */
9372 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9373
9374 /*
9375 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
9376 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
9377 * length. However, if delivery of a software interrupt, software exception or privileged
9378 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
9379 */
9380 VMXVEXITINFO ExitInfo;
9381 RT_ZERO(ExitInfo);
9382 ExitInfo.uReason = pVmxTransient->uExitReason;
9383 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9384 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9385
9386 VMXVEXITEVENTINFO ExitEventInfo;
9387 RT_ZERO(ExitEventInfo);
9388 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
9389 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
9390 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9391 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9392
9393#ifdef DEBUG_ramshankar
9394 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9395 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
9396 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
9397 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
9398 {
9399 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
9400 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
9401 }
9402#endif
9403 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
9404 }
9405
9406 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
9407 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
9408 return vmxHCExitXcpt(pVCpu, pVmxTransient);
9409 }
9410
9411 /*
9412 * Software interrupts:
9413 * VM-exits cannot be caused by software interrupts.
9414 *
9415 * External interrupts:
9416 * This should only happen when "acknowledge external interrupts on VM-exit"
9417 * control is set. However, we never set this when executing a guest or
9418 * nested-guest. For nested-guests it is emulated while injecting interrupts into
9419 * the guest.
9420 */
9421 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
9422 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
9423 default:
9424 {
9425 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
9426 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9427 }
9428 }
9429}
9430
9431
9432/**
9433 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
9434 * Unconditional VM-exit.
9435 */
9436HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9437{
9438 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9439 return IEMExecVmxVmexitTripleFault(pVCpu);
9440}
9441
9442
9443/**
9444 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9445 */
9446HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9447{
9448 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9449
9450 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
9451 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9452 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9453}
9454
9455
9456/**
9457 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9458 */
9459HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9460{
9461 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9462
9463 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
9464 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
9465 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
9466}
9467
9468
9469/**
9470 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
9471 * Unconditional VM-exit.
9472 */
9473HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9474{
9475 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9476
9477 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9478 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9479 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
9480 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
9481
9482 VMXVEXITINFO ExitInfo;
9483 RT_ZERO(ExitInfo);
9484 ExitInfo.uReason = pVmxTransient->uExitReason;
9485 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9486 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9487
9488 VMXVEXITEVENTINFO ExitEventInfo;
9489 RT_ZERO(ExitEventInfo);
9490 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
9491 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
9492 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
9493}
9494
9495
9496/**
9497 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
9498 */
9499HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9500{
9501 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9502
9503 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
9504 {
9505 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9506 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9507 }
9508 return vmxHCExitHlt(pVCpu, pVmxTransient);
9509}
9510
9511
9512/**
9513 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
9514 */
9515HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9516{
9517 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9518
9519 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
9520 {
9521 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9522 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9523
9524 VMXVEXITINFO ExitInfo;
9525 RT_ZERO(ExitInfo);
9526 ExitInfo.uReason = pVmxTransient->uExitReason;
9527 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9528 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9529 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9530 }
9531 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
9532}
9533
9534
9535/**
9536 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9537 */
9538HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9539{
9540 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9541
9542 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
9543 {
9544 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9545 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9546 }
9547 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
9548}
9549
9550
9551/**
9552 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
9553 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
9554 */
9555HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9556{
9557 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9558
9559 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
9560 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
9561
9562 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9563
9564 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
9565 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9566 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9567
9568 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
9569 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
9570 u64VmcsField &= UINT64_C(0xffffffff);
9571
9572 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
9573 {
9574 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9575 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9576
9577 VMXVEXITINFO ExitInfo;
9578 RT_ZERO(ExitInfo);
9579 ExitInfo.uReason = pVmxTransient->uExitReason;
9580 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9581 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9582 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9583 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9584 }
9585
9586 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
9587 return vmxHCExitVmread(pVCpu, pVmxTransient);
9588 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
9589}
9590
9591
9592/**
9593 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9594 */
9595HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9596{
9597 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9598
9599 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
9600 {
9601 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9602 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9603 }
9604
9605 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
9606}
9607
9608
9609/**
9610 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
9611 * Conditional VM-exit.
9612 */
9613HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9614{
9615 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9616
9617 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9618 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9619
9620 VBOXSTRICTRC rcStrict;
9621 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
9622 switch (uAccessType)
9623 {
9624 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
9625 {
9626 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9627 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9628 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
9629 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
9630
9631 bool fIntercept;
9632 switch (iCrReg)
9633 {
9634 case 0:
9635 case 4:
9636 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
9637 break;
9638
9639 case 3:
9640 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
9641 break;
9642
9643 case 8:
9644 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
9645 break;
9646
9647 default:
9648 fIntercept = false;
9649 break;
9650 }
9651 if (fIntercept)
9652 {
9653 VMXVEXITINFO ExitInfo;
9654 RT_ZERO(ExitInfo);
9655 ExitInfo.uReason = pVmxTransient->uExitReason;
9656 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9657 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9658 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9659 }
9660 else
9661 {
9662 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
9663 AssertRCReturn(rc, rc);
9664 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9665 }
9666 break;
9667 }
9668
9669 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
9670 {
9671 /*
9672 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
9673 * CR2 reads do not cause a VM-exit.
9674 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
9675 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
9676 */
9677 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
9678 if ( iCrReg == 3
9679 || iCrReg == 8)
9680 {
9681 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
9682 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
9683 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
9684 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
9685 {
9686 VMXVEXITINFO ExitInfo;
9687 RT_ZERO(ExitInfo);
9688 ExitInfo.uReason = pVmxTransient->uExitReason;
9689 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9690 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9691 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9692 }
9693 else
9694 {
9695 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
9696 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
9697 }
9698 }
9699 else
9700 {
9701 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
9702 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
9703 }
9704 break;
9705 }
9706
9707 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
9708 {
9709 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
9710 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
9711 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
9712 if ( (uGstHostMask & X86_CR0_TS)
9713 && (uReadShadow & X86_CR0_TS))
9714 {
9715 VMXVEXITINFO ExitInfo;
9716 RT_ZERO(ExitInfo);
9717 ExitInfo.uReason = pVmxTransient->uExitReason;
9718 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9719 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9720 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9721 }
9722 else
9723 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
9724 break;
9725 }
9726
9727 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9728 {
9729 RTGCPTR GCPtrEffDst;
9730 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
9731 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
9732 if (fMemOperand)
9733 {
9734 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9735 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
9736 }
9737 else
9738 GCPtrEffDst = NIL_RTGCPTR;
9739
9740 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
9741 {
9742 VMXVEXITINFO ExitInfo;
9743 RT_ZERO(ExitInfo);
9744 ExitInfo.uReason = pVmxTransient->uExitReason;
9745 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9746 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
9747 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9748 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9749 }
9750 else
9751 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
9752 break;
9753 }
9754
9755 default:
9756 {
9757 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
9758 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
9759 }
9760 }
9761
9762 if (rcStrict == VINF_IEM_RAISED_XCPT)
9763 {
9764 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
9765 rcStrict = VINF_SUCCESS;
9766 }
9767 return rcStrict;
9768}
9769
9770
9771/**
9772 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
9773 * Conditional VM-exit.
9774 */
9775HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9776{
9777 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9778
9779 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
9780 {
9781 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9782 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9783
9784 VMXVEXITINFO ExitInfo;
9785 RT_ZERO(ExitInfo);
9786 ExitInfo.uReason = pVmxTransient->uExitReason;
9787 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9788 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9789 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9790 }
9791 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
9792}
9793
9794
9795/**
9796 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
9797 * Conditional VM-exit.
9798 */
9799HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9800{
9801 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9802
9803 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9804
9805 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
9806 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
9807 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
9808
9809 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
9810 uint8_t const cbAccess = s_aIOSizes[uIOSize];
9811 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
9812 {
9813 /*
9814 * IN/OUT instruction:
9815 * - Provides VM-exit instruction length.
9816 *
9817 * INS/OUTS instruction:
9818 * - Provides VM-exit instruction length.
9819 * - Provides Guest-linear address.
9820 * - Optionally provides VM-exit instruction info (depends on CPU feature).
9821 */
9822 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9823 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9824
9825 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
9826 pVmxTransient->ExitInstrInfo.u = 0;
9827 pVmxTransient->uGuestLinearAddr = 0;
9828
9829 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
9830 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
9831 if (fIOString)
9832 {
9833 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
9834 if (fVmxInsOutsInfo)
9835 {
9836 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
9837 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
9838 }
9839 }
9840
9841 VMXVEXITINFO ExitInfo;
9842 RT_ZERO(ExitInfo);
9843 ExitInfo.uReason = pVmxTransient->uExitReason;
9844 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
9845 ExitInfo.u64Qual = pVmxTransient->uExitQual;
9846 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
9847 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
9848 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
9849 }
9850 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
9851}
9852
9853
9854/**
9855 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9856 */
9857HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9858{
9859 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9860
9861 uint32_t fMsrpm;
9862 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9863 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9864 else
9865 fMsrpm = VMXMSRPM_EXIT_RD;
9866
9867 if (fMsrpm & VMXMSRPM_EXIT_RD)
9868 {
9869 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9870 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9871 }
9872 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
9873}
9874
9875
9876/**
9877 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9878 */
9879HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9880{
9881 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9882
9883 uint32_t fMsrpm;
9884 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
9885 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
9886 else
9887 fMsrpm = VMXMSRPM_EXIT_WR;
9888
9889 if (fMsrpm & VMXMSRPM_EXIT_WR)
9890 {
9891 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9892 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9893 }
9894 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
9895}
9896
9897
9898/**
9899 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
9900 */
9901HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9902{
9903 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9904
9905 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
9906 {
9907 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9908 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9909 }
9910 return vmxHCExitMwait(pVCpu, pVmxTransient);
9911}
9912
9913
9914/**
9915 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
9916 * VM-exit.
9917 */
9918HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9919{
9920 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9921
9922 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
9923 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9924 VMXVEXITINFO ExitInfo;
9925 RT_ZERO(ExitInfo);
9926 ExitInfo.uReason = pVmxTransient->uExitReason;
9927 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9928 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9929}
9930
9931
9932/**
9933 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
9934 */
9935HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9936{
9937 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9938
9939 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
9940 {
9941 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9942 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9943 }
9944 return vmxHCExitMonitor(pVCpu, pVmxTransient);
9945}
9946
9947
9948/**
9949 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
9950 */
9951HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9952{
9953 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9954
9955 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
9956 * PAUSE when executing a nested-guest? If it does not, we would not need
9957 * to check for the intercepts here. Just call VM-exit... */
9958
9959 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
9960 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
9961 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
9962 {
9963 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9964 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
9965 }
9966 return vmxHCExitPause(pVCpu, pVmxTransient);
9967}
9968
9969
9970/**
9971 * Nested-guest VM-exit handler for when the TPR value is lowered below the
9972 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
9973 */
9974HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9975{
9976 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9977
9978 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
9979 {
9980 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
9981 VMXVEXITINFO ExitInfo;
9982 RT_ZERO(ExitInfo);
9983 ExitInfo.uReason = pVmxTransient->uExitReason;
9984 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
9985 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
9986 }
9987 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
9988}
9989
9990
9991/**
9992 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
9993 * VM-exit.
9994 */
9995HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9996{
9997 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
9998
9999 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10000 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10001 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10002 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10003
10004 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
10005
10006 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
10007 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
10008
10009 VMXVEXITINFO ExitInfo;
10010 RT_ZERO(ExitInfo);
10011 ExitInfo.uReason = pVmxTransient->uExitReason;
10012 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10013 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10014
10015 VMXVEXITEVENTINFO ExitEventInfo;
10016 RT_ZERO(ExitEventInfo);
10017 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10018 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10019 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
10020}
10021
10022
10023/**
10024 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
10025 * Conditional VM-exit.
10026 */
10027HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10028{
10029 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10030
10031 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
10032 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10033 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10034}
10035
10036
10037/**
10038 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
10039 * Conditional VM-exit.
10040 */
10041HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10042{
10043 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10044
10045 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
10046 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10047 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
10048}
10049
10050
10051/**
10052 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10053 */
10054HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10055{
10056 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10057
10058 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
10059 {
10060 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
10061 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10062 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10063 }
10064 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
10065}
10066
10067
10068/**
10069 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10070 */
10071HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10072{
10073 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10074
10075 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
10076 {
10077 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10078 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10079 }
10080 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
10081}
10082
10083
10084/**
10085 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10086 */
10087HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10088{
10089 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10090
10091 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
10092 {
10093 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
10094 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10095 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10096 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10097
10098 VMXVEXITINFO ExitInfo;
10099 RT_ZERO(ExitInfo);
10100 ExitInfo.uReason = pVmxTransient->uExitReason;
10101 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10102 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10103 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10104 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10105 }
10106 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
10107}
10108
10109
10110/**
10111 * Nested-guest VM-exit handler for invalid-guest state
10112 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
10113 */
10114HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10115{
10116 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10117
10118 /*
10119 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
10120 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
10121 * Handle it like it's in an invalid guest state of the outer guest.
10122 *
10123 * When the fast path is implemented, this should be changed to cause the corresponding
10124 * nested-guest VM-exit.
10125 */
10126 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
10127}
10128
10129
10130/**
10131 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
10132 * and only provide the instruction length.
10133 *
10134 * Unconditional VM-exit.
10135 */
10136HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10137{
10138 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10139
10140#ifdef VBOX_STRICT
10141 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10142 switch (pVmxTransient->uExitReason)
10143 {
10144 case VMX_EXIT_ENCLS:
10145 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
10146 break;
10147
10148 case VMX_EXIT_VMFUNC:
10149 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
10150 break;
10151 }
10152#endif
10153
10154 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10155 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
10156}
10157
10158
10159/**
10160 * Nested-guest VM-exit handler for instructions that provide instruction length as
10161 * well as more information.
10162 *
10163 * Unconditional VM-exit.
10164 */
10165HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10166{
10167 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10168
10169#ifdef VBOX_STRICT
10170 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10171 switch (pVmxTransient->uExitReason)
10172 {
10173 case VMX_EXIT_GDTR_IDTR_ACCESS:
10174 case VMX_EXIT_LDTR_TR_ACCESS:
10175 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
10176 break;
10177
10178 case VMX_EXIT_RDRAND:
10179 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
10180 break;
10181
10182 case VMX_EXIT_RDSEED:
10183 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
10184 break;
10185
10186 case VMX_EXIT_XSAVES:
10187 case VMX_EXIT_XRSTORS:
10188 /** @todo NSTVMX: Verify XSS-bitmap. */
10189 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
10190 break;
10191
10192 case VMX_EXIT_UMWAIT:
10193 case VMX_EXIT_TPAUSE:
10194 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
10195 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
10196 break;
10197
10198 case VMX_EXIT_LOADIWKEY:
10199 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
10200 break;
10201 }
10202#endif
10203
10204 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10205 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10206 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10207
10208 VMXVEXITINFO ExitInfo;
10209 RT_ZERO(ExitInfo);
10210 ExitInfo.uReason = pVmxTransient->uExitReason;
10211 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10212 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10213 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
10214 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
10215}
10216
10217
10218# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
10219/**
10220 * Nested-guest VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION).
10221 * Conditional VM-exit.
10222 */
10223HMVMX_EXIT_DECL vmxHCExitEptViolationNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10224{
10225 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10226 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10227
10228 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10229 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10230 AssertRCReturn(rc, rc);
10231
10232 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10233 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10234 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10235
10236 RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr;
10237 uint64_t const uExitQual = pVmxTransient->uExitQual;
10238
10239 RTGCPTR GCPtrNested;
10240 bool const fIsLinearAddrValid = RT_BOOL(uExitQual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID);
10241 if (fIsLinearAddrValid)
10242 {
10243 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
10244 GCPtrNested = pVmxTransient->uGuestLinearAddr;
10245 }
10246 else
10247 GCPtrNested = 0;
10248
10249 RTGCUINT const uErr = ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH) ? X86_TRAP_PF_ID : 0)
10250 | ((uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE) ? X86_TRAP_PF_RW : 0)
10251 | ((uExitQual & ( VMX_EXIT_QUAL_EPT_ENTRY_READ
10252 | VMX_EXIT_QUAL_EPT_ENTRY_WRITE
10253 | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE)) ? X86_TRAP_PF_P : 0);
10254
10255 PGMPTWALK Walk;
10256 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10257 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, uErr, CPUMCTX2CORE(pCtx), GCPhysNested,
10258 fIsLinearAddrValid, GCPtrNested, &Walk);
10259 if (RT_SUCCESS(rcStrict))
10260 {
10261 if (rcStrict == VINF_SUCCESS)
10262 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10263 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10264 {
10265 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10266 rcStrict = VINF_SUCCESS;
10267 }
10268 return rcStrict;
10269 }
10270
10271 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10272 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10273
10274 VMXVEXITEVENTINFO ExitEventInfo;
10275 RT_ZERO(ExitEventInfo);
10276 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10277 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10278
10279 if (Walk.fFailed & PGM_WALKFAIL_EPT_VIOLATION)
10280 {
10281 VMXVEXITINFO ExitInfo;
10282 RT_ZERO(ExitInfo);
10283 ExitInfo.uReason = VMX_EXIT_EPT_VIOLATION;
10284 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
10285 ExitInfo.u64Qual = pVmxTransient->uExitQual;
10286 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
10287 ExitInfo.u64GuestPhysAddr = pVmxTransient->uGuestPhysicalAddr;
10288 return IEMExecVmxVmexitEptViolation(pVCpu, &ExitInfo, &ExitEventInfo);
10289 }
10290
10291 Assert(Walk.fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
10292 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10293}
10294
10295
10296/**
10297 * Nested-guest VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
10298 * Conditional VM-exit.
10299 */
10300HMVMX_EXIT_DECL vmxHCExitEptMisconfigNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10301{
10302 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10303 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
10304
10305 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10306 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10307 AssertRCReturn(rc, rc);
10308
10309 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
10310
10311 PGMPTWALK Walk;
10312 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10313 RTGCPHYS const GCPhysNested = pVmxTransient->uGuestPhysicalAddr;
10314 VBOXSTRICTRC rcStrict = PGMR0NestedTrap0eHandlerNestedPaging(pVCpu, PGMMODE_EPT, X86_TRAP_PF_RSVD, CPUMCTX2CORE(pCtx),
10315 GCPhysNested, false /* fIsLinearAddrValid */,
10316 0 /* GCPtrNested*/, &Walk);
10317 if (RT_SUCCESS(rcStrict))
10318 return VINF_EM_RAW_EMULATE_INSTR;
10319
10320 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
10321 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
10322
10323 VMXVEXITEVENTINFO ExitEventInfo;
10324 RT_ZERO(ExitEventInfo);
10325 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
10326 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
10327
10328 return IEMExecVmxVmexitEptMisconfig(pVCpu, pVmxTransient->uGuestPhysicalAddr, &ExitEventInfo);
10329}
10330# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
10331
10332/** @} */
10333#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10334
10335
10336/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10337 * probes.
10338 *
10339 * The following few functions and associated structure contains the bloat
10340 * necessary for providing detailed debug events and dtrace probes as well as
10341 * reliable host side single stepping. This works on the principle of
10342 * "subclassing" the normal execution loop and workers. We replace the loop
10343 * method completely and override selected helpers to add necessary adjustments
10344 * to their core operation.
10345 *
10346 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10347 * any performance for debug and analysis features.
10348 *
10349 * @{
10350 */
10351
10352/**
10353 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10354 * the debug run loop.
10355 */
10356typedef struct VMXRUNDBGSTATE
10357{
10358 /** The RIP we started executing at. This is for detecting that we stepped. */
10359 uint64_t uRipStart;
10360 /** The CS we started executing with. */
10361 uint16_t uCsStart;
10362
10363 /** Whether we've actually modified the 1st execution control field. */
10364 bool fModifiedProcCtls : 1;
10365 /** Whether we've actually modified the 2nd execution control field. */
10366 bool fModifiedProcCtls2 : 1;
10367 /** Whether we've actually modified the exception bitmap. */
10368 bool fModifiedXcptBitmap : 1;
10369
10370 /** We desire the modified the CR0 mask to be cleared. */
10371 bool fClearCr0Mask : 1;
10372 /** We desire the modified the CR4 mask to be cleared. */
10373 bool fClearCr4Mask : 1;
10374 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10375 uint32_t fCpe1Extra;
10376 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10377 uint32_t fCpe1Unwanted;
10378 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10379 uint32_t fCpe2Extra;
10380 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10381 uint32_t bmXcptExtra;
10382 /** The sequence number of the Dtrace provider settings the state was
10383 * configured against. */
10384 uint32_t uDtraceSettingsSeqNo;
10385 /** VM-exits to check (one bit per VM-exit). */
10386 uint32_t bmExitsToCheck[3];
10387
10388 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10389 uint32_t fProcCtlsInitial;
10390 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10391 uint32_t fProcCtls2Initial;
10392 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10393 uint32_t bmXcptInitial;
10394} VMXRUNDBGSTATE;
10395AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10396typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10397
10398
10399/**
10400 * Initializes the VMXRUNDBGSTATE structure.
10401 *
10402 * @param pVCpu The cross context virtual CPU structure of the
10403 * calling EMT.
10404 * @param pVmxTransient The VMX-transient structure.
10405 * @param pDbgState The debug state to initialize.
10406 */
10407static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10408{
10409 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10410 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10411
10412 pDbgState->fModifiedProcCtls = false;
10413 pDbgState->fModifiedProcCtls2 = false;
10414 pDbgState->fModifiedXcptBitmap = false;
10415 pDbgState->fClearCr0Mask = false;
10416 pDbgState->fClearCr4Mask = false;
10417 pDbgState->fCpe1Extra = 0;
10418 pDbgState->fCpe1Unwanted = 0;
10419 pDbgState->fCpe2Extra = 0;
10420 pDbgState->bmXcptExtra = 0;
10421 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
10422 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
10423 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
10424}
10425
10426
10427/**
10428 * Updates the VMSC fields with changes requested by @a pDbgState.
10429 *
10430 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
10431 * immediately before executing guest code, i.e. when interrupts are disabled.
10432 * We don't check status codes here as we cannot easily assert or return in the
10433 * latter case.
10434 *
10435 * @param pVCpu The cross context virtual CPU structure.
10436 * @param pVmxTransient The VMX-transient structure.
10437 * @param pDbgState The debug state.
10438 */
10439static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10440{
10441 /*
10442 * Ensure desired flags in VMCS control fields are set.
10443 * (Ignoring write failure here, as we're committed and it's just debug extras.)
10444 *
10445 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
10446 * there should be no stale data in pCtx at this point.
10447 */
10448 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10449 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
10450 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
10451 {
10452 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
10453 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
10454 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
10455 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
10456 pDbgState->fModifiedProcCtls = true;
10457 }
10458
10459 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
10460 {
10461 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
10462 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
10463 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
10464 pDbgState->fModifiedProcCtls2 = true;
10465 }
10466
10467 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
10468 {
10469 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
10470 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
10471 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
10472 pDbgState->fModifiedXcptBitmap = true;
10473 }
10474
10475 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
10476 {
10477 pVmcsInfo->u64Cr0Mask = 0;
10478 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
10479 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
10480 }
10481
10482 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
10483 {
10484 pVmcsInfo->u64Cr4Mask = 0;
10485 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
10486 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
10487 }
10488
10489 NOREF(pVCpu);
10490}
10491
10492
10493/**
10494 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
10495 * re-entry next time around.
10496 *
10497 * @returns Strict VBox status code (i.e. informational status codes too).
10498 * @param pVCpu The cross context virtual CPU structure.
10499 * @param pVmxTransient The VMX-transient structure.
10500 * @param pDbgState The debug state.
10501 * @param rcStrict The return code from executing the guest using single
10502 * stepping.
10503 */
10504static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
10505 VBOXSTRICTRC rcStrict)
10506{
10507 /*
10508 * Restore VM-exit control settings as we may not reenter this function the
10509 * next time around.
10510 */
10511 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10512
10513 /* We reload the initial value, trigger what we can of recalculations the
10514 next time around. From the looks of things, that's all that's required atm. */
10515 if (pDbgState->fModifiedProcCtls)
10516 {
10517 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
10518 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
10519 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
10520 AssertRC(rc2);
10521 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
10522 }
10523
10524 /* We're currently the only ones messing with this one, so just restore the
10525 cached value and reload the field. */
10526 if ( pDbgState->fModifiedProcCtls2
10527 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
10528 {
10529 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
10530 AssertRC(rc2);
10531 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
10532 }
10533
10534 /* If we've modified the exception bitmap, we restore it and trigger
10535 reloading and partial recalculation the next time around. */
10536 if (pDbgState->fModifiedXcptBitmap)
10537 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
10538
10539 return rcStrict;
10540}
10541
10542
10543/**
10544 * Configures VM-exit controls for current DBGF and DTrace settings.
10545 *
10546 * This updates @a pDbgState and the VMCS execution control fields to reflect
10547 * the necessary VM-exits demanded by DBGF and DTrace.
10548 *
10549 * @param pVCpu The cross context virtual CPU structure.
10550 * @param pVmxTransient The VMX-transient structure. May update
10551 * fUpdatedTscOffsettingAndPreemptTimer.
10552 * @param pDbgState The debug state.
10553 */
10554static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10555{
10556#ifndef IN_NEM_DARWIN
10557 /*
10558 * Take down the dtrace serial number so we can spot changes.
10559 */
10560 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
10561 ASMCompilerBarrier();
10562#endif
10563
10564 /*
10565 * We'll rebuild most of the middle block of data members (holding the
10566 * current settings) as we go along here, so start by clearing it all.
10567 */
10568 pDbgState->bmXcptExtra = 0;
10569 pDbgState->fCpe1Extra = 0;
10570 pDbgState->fCpe1Unwanted = 0;
10571 pDbgState->fCpe2Extra = 0;
10572 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
10573 pDbgState->bmExitsToCheck[i] = 0;
10574
10575 /*
10576 * Software interrupts (INT XXh) - no idea how to trigger these...
10577 */
10578 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10579 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
10580 || VBOXVMM_INT_SOFTWARE_ENABLED())
10581 {
10582 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10583 }
10584
10585 /*
10586 * INT3 breakpoints - triggered by #BP exceptions.
10587 */
10588 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
10589 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10590
10591 /*
10592 * Exception bitmap and XCPT events+probes.
10593 */
10594 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
10595 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
10596 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
10597
10598 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
10599 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
10600 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
10601 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
10602 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
10603 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
10604 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
10605 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
10606 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
10607 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
10608 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
10609 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
10610 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
10611 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
10612 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
10613 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
10614 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
10615 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
10616
10617 if (pDbgState->bmXcptExtra)
10618 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
10619
10620 /*
10621 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
10622 *
10623 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
10624 * So, when adding/changing/removing please don't forget to update it.
10625 *
10626 * Some of the macros are picking up local variables to save horizontal space,
10627 * (being able to see it in a table is the lesser evil here).
10628 */
10629#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
10630 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
10631 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
10632#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
10633 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10634 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10635 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10636 } else do { } while (0)
10637#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
10638 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10639 { \
10640 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
10641 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10642 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10643 } else do { } while (0)
10644#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
10645 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10646 { \
10647 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
10648 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10649 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10650 } else do { } while (0)
10651#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
10652 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
10653 { \
10654 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
10655 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
10656 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
10657 } else do { } while (0)
10658
10659 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
10660 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
10661 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
10662 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
10663 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
10664
10665 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
10666 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
10667 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
10668 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
10669 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
10670 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
10671 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
10672 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
10673 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
10674 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
10675 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
10676 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
10677 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
10678 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
10679 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
10680 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
10681 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
10682 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
10683 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
10684 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
10685 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
10686 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
10687 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
10688 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
10689 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
10690 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
10691 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
10692 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
10693 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
10694 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
10695 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
10696 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
10697 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
10698 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
10699 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
10700 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
10701
10702 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
10703 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10704 {
10705 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
10706 | CPUMCTX_EXTRN_APIC_TPR);
10707 AssertRC(rc);
10708
10709#if 0 /** @todo fix me */
10710 pDbgState->fClearCr0Mask = true;
10711 pDbgState->fClearCr4Mask = true;
10712#endif
10713 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
10714 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
10715 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
10716 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10717 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
10718 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
10719 require clearing here and in the loop if we start using it. */
10720 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
10721 }
10722 else
10723 {
10724 if (pDbgState->fClearCr0Mask)
10725 {
10726 pDbgState->fClearCr0Mask = false;
10727 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
10728 }
10729 if (pDbgState->fClearCr4Mask)
10730 {
10731 pDbgState->fClearCr4Mask = false;
10732 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
10733 }
10734 }
10735 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
10736 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
10737
10738 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
10739 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
10740 {
10741 /** @todo later, need to fix handler as it assumes this won't usually happen. */
10742 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
10743 }
10744 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
10745 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
10746
10747 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
10748 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
10749 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
10750 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
10751 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
10752 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
10753 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
10754 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
10755#if 0 /** @todo too slow, fix handler. */
10756 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
10757#endif
10758 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
10759
10760 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
10761 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
10762 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
10763 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
10764 {
10765 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10766 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
10767 }
10768 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10769 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10770 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10771 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
10772
10773 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
10774 || IS_EITHER_ENABLED(pVM, INSTR_STR)
10775 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
10776 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
10777 {
10778 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
10779 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
10780 }
10781 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
10782 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
10783 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
10784 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
10785
10786 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
10787 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
10788 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
10789 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
10790 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
10791 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
10792 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
10793 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
10794 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
10795 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
10796 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
10797 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
10798 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
10799 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
10800 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
10801 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
10802 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
10803 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
10804 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
10805 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
10806 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
10807 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
10808
10809#undef IS_EITHER_ENABLED
10810#undef SET_ONLY_XBM_IF_EITHER_EN
10811#undef SET_CPE1_XBM_IF_EITHER_EN
10812#undef SET_CPEU_XBM_IF_EITHER_EN
10813#undef SET_CPE2_XBM_IF_EITHER_EN
10814
10815 /*
10816 * Sanitize the control stuff.
10817 */
10818 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
10819 if (pDbgState->fCpe2Extra)
10820 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
10821 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
10822 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
10823#ifndef IN_NEM_DARWIN
10824 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10825 {
10826 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
10827 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10828 }
10829#else
10830 if (pVCpu->nem.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
10831 {
10832 pVCpu->nem.s.fDebugWantRdTscExit ^= true;
10833 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
10834 }
10835#endif
10836
10837 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
10838 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
10839 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
10840 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
10841}
10842
10843
10844/**
10845 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
10846 * appropriate.
10847 *
10848 * The caller has checked the VM-exit against the
10849 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
10850 * already, so we don't have to do that either.
10851 *
10852 * @returns Strict VBox status code (i.e. informational status codes too).
10853 * @param pVCpu The cross context virtual CPU structure.
10854 * @param pVmxTransient The VMX-transient structure.
10855 * @param uExitReason The VM-exit reason.
10856 *
10857 * @remarks The name of this function is displayed by dtrace, so keep it short
10858 * and to the point. No longer than 33 chars long, please.
10859 */
10860static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
10861{
10862 /*
10863 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
10864 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
10865 *
10866 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
10867 * does. Must add/change/remove both places. Same ordering, please.
10868 *
10869 * Added/removed events must also be reflected in the next section
10870 * where we dispatch dtrace events.
10871 */
10872 bool fDtrace1 = false;
10873 bool fDtrace2 = false;
10874 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
10875 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
10876 uint32_t uEventArg = 0;
10877#define SET_EXIT(a_EventSubName) \
10878 do { \
10879 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10880 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10881 } while (0)
10882#define SET_BOTH(a_EventSubName) \
10883 do { \
10884 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
10885 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
10886 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
10887 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
10888 } while (0)
10889 switch (uExitReason)
10890 {
10891 case VMX_EXIT_MTF:
10892 return vmxHCExitMtf(pVCpu, pVmxTransient);
10893
10894 case VMX_EXIT_XCPT_OR_NMI:
10895 {
10896 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
10897 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
10898 {
10899 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
10900 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
10901 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
10902 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
10903 {
10904 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
10905 {
10906 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
10907 uEventArg = pVmxTransient->uExitIntErrorCode;
10908 }
10909 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
10910 switch (enmEvent1)
10911 {
10912 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
10913 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
10914 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
10915 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
10916 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
10917 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
10918 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
10919 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
10920 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
10921 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
10922 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
10923 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
10924 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
10925 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
10926 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
10927 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
10928 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
10929 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
10930 default: break;
10931 }
10932 }
10933 else
10934 AssertFailed();
10935 break;
10936
10937 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
10938 uEventArg = idxVector;
10939 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
10940 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
10941 break;
10942 }
10943 break;
10944 }
10945
10946 case VMX_EXIT_TRIPLE_FAULT:
10947 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
10948 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
10949 break;
10950 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
10951 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
10952 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
10953 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
10954 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
10955
10956 /* Instruction specific VM-exits: */
10957 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
10958 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
10959 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
10960 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
10961 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
10962 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
10963 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
10964 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
10965 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
10966 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
10967 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
10968 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
10969 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
10970 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
10971 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
10972 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
10973 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
10974 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
10975 case VMX_EXIT_MOV_CRX:
10976 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10977 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
10978 SET_BOTH(CRX_READ);
10979 else
10980 SET_BOTH(CRX_WRITE);
10981 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
10982 break;
10983 case VMX_EXIT_MOV_DRX:
10984 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10985 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
10986 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
10987 SET_BOTH(DRX_READ);
10988 else
10989 SET_BOTH(DRX_WRITE);
10990 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
10991 break;
10992 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
10993 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
10994 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
10995 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
10996 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
10997 case VMX_EXIT_GDTR_IDTR_ACCESS:
10998 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
10999 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11000 {
11001 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11002 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11003 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11004 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11005 }
11006 break;
11007
11008 case VMX_EXIT_LDTR_TR_ACCESS:
11009 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11010 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11011 {
11012 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11013 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11014 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11015 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11016 }
11017 break;
11018
11019 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11020 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11021 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11022 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11023 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11024 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11025 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11026 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11027 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11028 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11029 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11030
11031 /* Events that aren't relevant at this point. */
11032 case VMX_EXIT_EXT_INT:
11033 case VMX_EXIT_INT_WINDOW:
11034 case VMX_EXIT_NMI_WINDOW:
11035 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11036 case VMX_EXIT_PREEMPT_TIMER:
11037 case VMX_EXIT_IO_INSTR:
11038 break;
11039
11040 /* Errors and unexpected events. */
11041 case VMX_EXIT_INIT_SIGNAL:
11042 case VMX_EXIT_SIPI:
11043 case VMX_EXIT_IO_SMI:
11044 case VMX_EXIT_SMI:
11045 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11046 case VMX_EXIT_ERR_MSR_LOAD:
11047 case VMX_EXIT_ERR_MACHINE_CHECK:
11048 case VMX_EXIT_PML_FULL:
11049 case VMX_EXIT_VIRTUALIZED_EOI:
11050 break;
11051
11052 default:
11053 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11054 break;
11055 }
11056#undef SET_BOTH
11057#undef SET_EXIT
11058
11059 /*
11060 * Dtrace tracepoints go first. We do them here at once so we don't
11061 * have to copy the guest state saving and stuff a few dozen times.
11062 * Down side is that we've got to repeat the switch, though this time
11063 * we use enmEvent since the probes are a subset of what DBGF does.
11064 */
11065 if (fDtrace1 || fDtrace2)
11066 {
11067 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11068 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11069 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11070 switch (enmEvent1)
11071 {
11072 /** @todo consider which extra parameters would be helpful for each probe. */
11073 case DBGFEVENT_END: break;
11074 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11075 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11076 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11077 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11078 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11079 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11080 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11081 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11082 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11083 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11084 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11085 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11086 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11087 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11088 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11089 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11090 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11091 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11092 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11093 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11094 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11095 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11096 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11097 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11098 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11099 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11100 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11101 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11102 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11103 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11104 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11105 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11106 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11107 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11108 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11109 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11110 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11111 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11112 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11113 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11114 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11115 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11116 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11117 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11118 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11119 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11120 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11121 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11122 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11123 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11124 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11125 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11126 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11127 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11128 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11129 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11130 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11131 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11132 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11133 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11134 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11135 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11136 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11137 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11138 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11139 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11140 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11141 }
11142 switch (enmEvent2)
11143 {
11144 /** @todo consider which extra parameters would be helpful for each probe. */
11145 case DBGFEVENT_END: break;
11146 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11147 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11148 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11149 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11150 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11151 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11152 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11153 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11154 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11155 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11156 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11157 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11158 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11159 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11160 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11161 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11162 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11163 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11164 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11165 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11166 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11167 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11168 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11169 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11170 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11171 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11172 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11173 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11174 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11175 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11176 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11177 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11178 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11179 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11180 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11181 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11182 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11183 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11184 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11185 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11186 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11187 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11188 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11189 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11190 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11191 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11192 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11193 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11194 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11195 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11196 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11197 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11198 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11199 }
11200 }
11201
11202 /*
11203 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11204 * the DBGF call will do a full check).
11205 *
11206 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11207 * Note! If we have to events, we prioritize the first, i.e. the instruction
11208 * one, in order to avoid event nesting.
11209 */
11210 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11211 if ( enmEvent1 != DBGFEVENT_END
11212 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11213 {
11214 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11215 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11216 if (rcStrict != VINF_SUCCESS)
11217 return rcStrict;
11218 }
11219 else if ( enmEvent2 != DBGFEVENT_END
11220 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11221 {
11222 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11223 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11224 if (rcStrict != VINF_SUCCESS)
11225 return rcStrict;
11226 }
11227
11228 return VINF_SUCCESS;
11229}
11230
11231
11232/**
11233 * Single-stepping VM-exit filtering.
11234 *
11235 * This is preprocessing the VM-exits and deciding whether we've gotten far
11236 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11237 * handling is performed.
11238 *
11239 * @returns Strict VBox status code (i.e. informational status codes too).
11240 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11241 * @param pVmxTransient The VMX-transient structure.
11242 * @param pDbgState The debug state.
11243 */
11244DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11245{
11246 /*
11247 * Expensive (saves context) generic dtrace VM-exit probe.
11248 */
11249 uint32_t const uExitReason = pVmxTransient->uExitReason;
11250 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11251 { /* more likely */ }
11252 else
11253 {
11254 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11255 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11256 AssertRC(rc);
11257 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11258 }
11259
11260#ifndef IN_NEM_DARWIN
11261 /*
11262 * Check for host NMI, just to get that out of the way.
11263 */
11264 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11265 { /* normally likely */ }
11266 else
11267 {
11268 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
11269 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11270 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11271 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
11272 }
11273#endif
11274
11275 /*
11276 * Check for single stepping event if we're stepping.
11277 */
11278 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
11279 {
11280 switch (uExitReason)
11281 {
11282 case VMX_EXIT_MTF:
11283 return vmxHCExitMtf(pVCpu, pVmxTransient);
11284
11285 /* Various events: */
11286 case VMX_EXIT_XCPT_OR_NMI:
11287 case VMX_EXIT_EXT_INT:
11288 case VMX_EXIT_TRIPLE_FAULT:
11289 case VMX_EXIT_INT_WINDOW:
11290 case VMX_EXIT_NMI_WINDOW:
11291 case VMX_EXIT_TASK_SWITCH:
11292 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11293 case VMX_EXIT_APIC_ACCESS:
11294 case VMX_EXIT_EPT_VIOLATION:
11295 case VMX_EXIT_EPT_MISCONFIG:
11296 case VMX_EXIT_PREEMPT_TIMER:
11297
11298 /* Instruction specific VM-exits: */
11299 case VMX_EXIT_CPUID:
11300 case VMX_EXIT_GETSEC:
11301 case VMX_EXIT_HLT:
11302 case VMX_EXIT_INVD:
11303 case VMX_EXIT_INVLPG:
11304 case VMX_EXIT_RDPMC:
11305 case VMX_EXIT_RDTSC:
11306 case VMX_EXIT_RSM:
11307 case VMX_EXIT_VMCALL:
11308 case VMX_EXIT_VMCLEAR:
11309 case VMX_EXIT_VMLAUNCH:
11310 case VMX_EXIT_VMPTRLD:
11311 case VMX_EXIT_VMPTRST:
11312 case VMX_EXIT_VMREAD:
11313 case VMX_EXIT_VMRESUME:
11314 case VMX_EXIT_VMWRITE:
11315 case VMX_EXIT_VMXOFF:
11316 case VMX_EXIT_VMXON:
11317 case VMX_EXIT_MOV_CRX:
11318 case VMX_EXIT_MOV_DRX:
11319 case VMX_EXIT_IO_INSTR:
11320 case VMX_EXIT_RDMSR:
11321 case VMX_EXIT_WRMSR:
11322 case VMX_EXIT_MWAIT:
11323 case VMX_EXIT_MONITOR:
11324 case VMX_EXIT_PAUSE:
11325 case VMX_EXIT_GDTR_IDTR_ACCESS:
11326 case VMX_EXIT_LDTR_TR_ACCESS:
11327 case VMX_EXIT_INVEPT:
11328 case VMX_EXIT_RDTSCP:
11329 case VMX_EXIT_INVVPID:
11330 case VMX_EXIT_WBINVD:
11331 case VMX_EXIT_XSETBV:
11332 case VMX_EXIT_RDRAND:
11333 case VMX_EXIT_INVPCID:
11334 case VMX_EXIT_VMFUNC:
11335 case VMX_EXIT_RDSEED:
11336 case VMX_EXIT_XSAVES:
11337 case VMX_EXIT_XRSTORS:
11338 {
11339 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11340 AssertRCReturn(rc, rc);
11341 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11342 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11343 return VINF_EM_DBG_STEPPED;
11344 break;
11345 }
11346
11347 /* Errors and unexpected events: */
11348 case VMX_EXIT_INIT_SIGNAL:
11349 case VMX_EXIT_SIPI:
11350 case VMX_EXIT_IO_SMI:
11351 case VMX_EXIT_SMI:
11352 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11353 case VMX_EXIT_ERR_MSR_LOAD:
11354 case VMX_EXIT_ERR_MACHINE_CHECK:
11355 case VMX_EXIT_PML_FULL:
11356 case VMX_EXIT_VIRTUALIZED_EOI:
11357 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11358 break;
11359
11360 default:
11361 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11362 break;
11363 }
11364 }
11365
11366 /*
11367 * Check for debugger event breakpoints and dtrace probes.
11368 */
11369 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11370 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11371 {
11372 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11373 if (rcStrict != VINF_SUCCESS)
11374 return rcStrict;
11375 }
11376
11377 /*
11378 * Normal processing.
11379 */
11380#ifdef HMVMX_USE_FUNCTION_TABLE
11381 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
11382#else
11383 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
11384#endif
11385}
11386
11387/** @} */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette