VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h@ 92626

Last change on this file since 92626 was 92626, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 Adjust PGM APIs and translate nested-guest CR3 prior to mapping them when switching mode and other places.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 606.0 KB
Line 
1/* $Id: VMXAllTemplate.cpp.h 92626 2021-11-29 12:32:58Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Code template for our own hypervisor and the NEM darwin backend using Apple's Hypervisor.framework.
4 */
5
6/*
7 * Copyright (C) 2012-2021 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#if !defined(VMX_VMCS_WRITE_16) || !defined(VMX_VMCS_WRITE_32) || !defined(VMX_VMCS_WRITE_64) || !defined(VMX_VMCS_WRITE_64)
23# error "At least one of the VMX_VMCS_WRITE_16, VMX_VMCS_WRITE_32, VMX_VMCS_WRITE_64 or VMX_VMCS_WRITE_64 is missing"
24#endif
25
26
27#if !defined(VMX_VMCS_READ_16) || !defined(VMX_VMCS_READ_32) || !defined(VMX_VMCS_READ_64) || !defined(VMX_VMCS_READ_64)
28# error "At least one of the VMX_VMCS_READ_16, VMX_VMCS_READ_32, VMX_VMCS_READ_64 or VMX_VMCS_READ_64 is missing"
29#endif
30
31
32/** Use the function table. */
33#define HMVMX_USE_FUNCTION_TABLE
34
35/** Determine which tagged-TLB flush handler to use. */
36#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
37#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
38#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
39#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
40
41/**
42 * Flags to skip redundant reads of some common VMCS fields that are not part of
43 * the guest-CPU or VCPU state but are needed while handling VM-exits.
44 */
45#define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0)
46#define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1)
47#define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2)
48#define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3)
49#define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4)
50#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5)
51#define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6)
52#define HMVMX_READ_GUEST_LINEAR_ADDR RT_BIT_32(7)
53#define HMVMX_READ_GUEST_PHYSICAL_ADDR RT_BIT_32(8)
54#define HMVMX_READ_GUEST_PENDING_DBG_XCPTS RT_BIT_32(9)
55
56/** All the VMCS fields required for processing of exception/NMI VM-exits. */
57#define HMVMX_READ_XCPT_INFO ( HMVMX_READ_EXIT_INTERRUPTION_INFO \
58 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE \
59 | HMVMX_READ_EXIT_INSTR_LEN \
60 | HMVMX_READ_IDT_VECTORING_INFO \
61 | HMVMX_READ_IDT_VECTORING_ERROR_CODE)
62
63/** Assert that all the given fields have been read from the VMCS. */
64#ifdef VBOX_STRICT
65# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
66 do { \
67 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
68 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
69 } while (0)
70#else
71# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
72#endif
73
74/**
75 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
76 * guest using hardware-assisted VMX.
77 *
78 * This excludes state like GPRs (other than RSP) which are always are
79 * swapped and restored across the world-switch and also registers like EFER,
80 * MSR which cannot be modified by the guest without causing a VM-exit.
81 */
82#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
83 | CPUMCTX_EXTRN_RFLAGS \
84 | CPUMCTX_EXTRN_RSP \
85 | CPUMCTX_EXTRN_SREG_MASK \
86 | CPUMCTX_EXTRN_TABLE_MASK \
87 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
88 | CPUMCTX_EXTRN_SYSCALL_MSRS \
89 | CPUMCTX_EXTRN_SYSENTER_MSRS \
90 | CPUMCTX_EXTRN_TSC_AUX \
91 | CPUMCTX_EXTRN_OTHER_MSRS \
92 | CPUMCTX_EXTRN_CR0 \
93 | CPUMCTX_EXTRN_CR3 \
94 | CPUMCTX_EXTRN_CR4 \
95 | CPUMCTX_EXTRN_DR7 \
96 | CPUMCTX_EXTRN_HWVIRT \
97 | CPUMCTX_EXTRN_INHIBIT_INT \
98 | CPUMCTX_EXTRN_INHIBIT_NMI)
99
100/**
101 * Exception bitmap mask for real-mode guests (real-on-v86).
102 *
103 * We need to intercept all exceptions manually except:
104 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
105 * due to bugs in Intel CPUs.
106 * - \#PF need not be intercepted even in real-mode if we have nested paging
107 * support.
108 */
109#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
110 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
111 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
112 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
113 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
114 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
115 | RT_BIT(X86_XCPT_XF))
116
117/** Maximum VM-instruction error number. */
118#define HMVMX_INSTR_ERROR_MAX 28
119
120/** Profiling macro. */
121#ifdef HM_PROFILE_EXIT_DISPATCH
122# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
123# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitDispatch, ed)
124#else
125# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
126# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
127#endif
128
129#ifdef IN_RING0
130/** Assert that preemption is disabled or covered by thread-context hooks. */
131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
132 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
133
134/** Assert that we haven't migrated CPUs when thread-context hooks are not
135 * used. */
136# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
137 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
138 ("Illegal migration! Entered on CPU %u Current %u\n", \
139 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
140#else
141# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) do { } while (0)
142# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) do { } while (0)
143#endif
144
145/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
146 * context. */
147#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
148 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
149 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
150
151/** Log the VM-exit reason with an easily visible marker to identify it in a
152 * potential sea of logging data. */
153#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
154 do { \
155 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
156 HMGetVmxExitName(a_uExitReason))); \
157 } while (0) \
158
159
160/*********************************************************************************************************************************
161* Structures and Typedefs *
162*********************************************************************************************************************************/
163/**
164 * Memory operand read or write access.
165 */
166typedef enum VMXMEMACCESS
167{
168 VMXMEMACCESS_READ = 0,
169 VMXMEMACCESS_WRITE = 1
170} VMXMEMACCESS;
171
172
173/**
174 * VMX VM-exit handler.
175 *
176 * @returns Strict VBox status code (i.e. informational status codes too).
177 * @param pVCpu The cross context virtual CPU structure.
178 * @param pVmxTransient The VMX-transient structure.
179 */
180#ifndef HMVMX_USE_FUNCTION_TABLE
181typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
182#else
183typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
184/** Pointer to VM-exit handler. */
185typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
186#endif
187
188/**
189 * VMX VM-exit handler, non-strict status code.
190 *
191 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
192 *
193 * @returns VBox status code, no informational status code returned.
194 * @param pVCpu The cross context virtual CPU structure.
195 * @param pVmxTransient The VMX-transient structure.
196 *
197 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
198 * use of that status code will be replaced with VINF_EM_SOMETHING
199 * later when switching over to IEM.
200 */
201#ifndef HMVMX_USE_FUNCTION_TABLE
202typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
203#else
204typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
205#endif
206
207
208/*********************************************************************************************************************************
209* Internal Functions *
210*********************************************************************************************************************************/
211#ifndef HMVMX_USE_FUNCTION_TABLE
212DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
213# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
214# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
215#else
216# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
217# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
218#endif
219#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
220DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
221#endif
222
223static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
224
225/** @name VM-exit handler prototypes.
226 * @{
227 */
228static FNVMXEXITHANDLER vmxHCExitXcptOrNmi;
229static FNVMXEXITHANDLER vmxHCExitExtInt;
230static FNVMXEXITHANDLER vmxHCExitTripleFault;
231static FNVMXEXITHANDLERNSRC vmxHCExitIntWindow;
232static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindow;
233static FNVMXEXITHANDLER vmxHCExitTaskSwitch;
234static FNVMXEXITHANDLER vmxHCExitCpuid;
235static FNVMXEXITHANDLER vmxHCExitGetsec;
236static FNVMXEXITHANDLER vmxHCExitHlt;
237static FNVMXEXITHANDLERNSRC vmxHCExitInvd;
238static FNVMXEXITHANDLER vmxHCExitInvlpg;
239static FNVMXEXITHANDLER vmxHCExitRdpmc;
240static FNVMXEXITHANDLER vmxHCExitVmcall;
241#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
242static FNVMXEXITHANDLER vmxHCExitVmclear;
243static FNVMXEXITHANDLER vmxHCExitVmlaunch;
244static FNVMXEXITHANDLER vmxHCExitVmptrld;
245static FNVMXEXITHANDLER vmxHCExitVmptrst;
246static FNVMXEXITHANDLER vmxHCExitVmread;
247static FNVMXEXITHANDLER vmxHCExitVmresume;
248static FNVMXEXITHANDLER vmxHCExitVmwrite;
249static FNVMXEXITHANDLER vmxHCExitVmxoff;
250static FNVMXEXITHANDLER vmxHCExitVmxon;
251static FNVMXEXITHANDLER vmxHCExitInvvpid;
252#endif
253static FNVMXEXITHANDLER vmxHCExitRdtsc;
254static FNVMXEXITHANDLER vmxHCExitMovCRx;
255static FNVMXEXITHANDLER vmxHCExitMovDRx;
256static FNVMXEXITHANDLER vmxHCExitIoInstr;
257static FNVMXEXITHANDLER vmxHCExitRdmsr;
258static FNVMXEXITHANDLER vmxHCExitWrmsr;
259static FNVMXEXITHANDLER vmxHCExitMwait;
260static FNVMXEXITHANDLER vmxHCExitMtf;
261static FNVMXEXITHANDLER vmxHCExitMonitor;
262static FNVMXEXITHANDLER vmxHCExitPause;
263static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThreshold;
264static FNVMXEXITHANDLER vmxHCExitApicAccess;
265static FNVMXEXITHANDLER vmxHCExitEptViolation;
266static FNVMXEXITHANDLER vmxHCExitEptMisconfig;
267static FNVMXEXITHANDLER vmxHCExitRdtscp;
268static FNVMXEXITHANDLER vmxHCExitPreemptTimer;
269static FNVMXEXITHANDLERNSRC vmxHCExitWbinvd;
270static FNVMXEXITHANDLER vmxHCExitXsetbv;
271static FNVMXEXITHANDLER vmxHCExitInvpcid;
272static FNVMXEXITHANDLERNSRC vmxHCExitSetPendingXcptUD;
273static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestState;
274static FNVMXEXITHANDLERNSRC vmxHCExitErrUnexpected;
275/** @} */
276
277#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
278/** @name Nested-guest VM-exit handler prototypes.
279 * @{
280 */
281static FNVMXEXITHANDLER vmxHCExitXcptOrNmiNested;
282static FNVMXEXITHANDLER vmxHCExitTripleFaultNested;
283static FNVMXEXITHANDLERNSRC vmxHCExitIntWindowNested;
284static FNVMXEXITHANDLERNSRC vmxHCExitNmiWindowNested;
285static FNVMXEXITHANDLER vmxHCExitTaskSwitchNested;
286static FNVMXEXITHANDLER vmxHCExitHltNested;
287static FNVMXEXITHANDLER vmxHCExitInvlpgNested;
288static FNVMXEXITHANDLER vmxHCExitRdpmcNested;
289static FNVMXEXITHANDLER vmxHCExitVmreadVmwriteNested;
290static FNVMXEXITHANDLER vmxHCExitRdtscNested;
291static FNVMXEXITHANDLER vmxHCExitMovCRxNested;
292static FNVMXEXITHANDLER vmxHCExitMovDRxNested;
293static FNVMXEXITHANDLER vmxHCExitIoInstrNested;
294static FNVMXEXITHANDLER vmxHCExitRdmsrNested;
295static FNVMXEXITHANDLER vmxHCExitWrmsrNested;
296static FNVMXEXITHANDLER vmxHCExitMwaitNested;
297static FNVMXEXITHANDLER vmxHCExitMtfNested;
298static FNVMXEXITHANDLER vmxHCExitMonitorNested;
299static FNVMXEXITHANDLER vmxHCExitPauseNested;
300static FNVMXEXITHANDLERNSRC vmxHCExitTprBelowThresholdNested;
301static FNVMXEXITHANDLER vmxHCExitApicAccessNested;
302static FNVMXEXITHANDLER vmxHCExitApicWriteNested;
303static FNVMXEXITHANDLER vmxHCExitVirtEoiNested;
304static FNVMXEXITHANDLER vmxHCExitRdtscpNested;
305static FNVMXEXITHANDLERNSRC vmxHCExitWbinvdNested;
306static FNVMXEXITHANDLER vmxHCExitInvpcidNested;
307static FNVMXEXITHANDLERNSRC vmxHCExitErrInvalidGuestStateNested;
308static FNVMXEXITHANDLER vmxHCExitInstrNested;
309static FNVMXEXITHANDLER vmxHCExitInstrWithInfoNested;
310/** @} */
311#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
312
313
314/*********************************************************************************************************************************
315* Global Variables *
316*********************************************************************************************************************************/
317#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
318/**
319 * Array of all VMCS fields.
320 * Any fields added to the VT-x spec. should be added here.
321 *
322 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
323 * of nested-guests.
324 */
325static const uint32_t g_aVmcsFields[] =
326{
327 /* 16-bit control fields. */
328 VMX_VMCS16_VPID,
329 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
330 VMX_VMCS16_EPTP_INDEX,
331
332 /* 16-bit guest-state fields. */
333 VMX_VMCS16_GUEST_ES_SEL,
334 VMX_VMCS16_GUEST_CS_SEL,
335 VMX_VMCS16_GUEST_SS_SEL,
336 VMX_VMCS16_GUEST_DS_SEL,
337 VMX_VMCS16_GUEST_FS_SEL,
338 VMX_VMCS16_GUEST_GS_SEL,
339 VMX_VMCS16_GUEST_LDTR_SEL,
340 VMX_VMCS16_GUEST_TR_SEL,
341 VMX_VMCS16_GUEST_INTR_STATUS,
342 VMX_VMCS16_GUEST_PML_INDEX,
343
344 /* 16-bits host-state fields. */
345 VMX_VMCS16_HOST_ES_SEL,
346 VMX_VMCS16_HOST_CS_SEL,
347 VMX_VMCS16_HOST_SS_SEL,
348 VMX_VMCS16_HOST_DS_SEL,
349 VMX_VMCS16_HOST_FS_SEL,
350 VMX_VMCS16_HOST_GS_SEL,
351 VMX_VMCS16_HOST_TR_SEL,
352
353 /* 64-bit control fields. */
354 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
355 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
356 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
357 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
358 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
359 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
360 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
361 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
362 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
363 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
364 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
365 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
366 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
367 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
368 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
369 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
370 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
371 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
372 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
373 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
374 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
375 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
376 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
377 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
378 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
379 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
380 VMX_VMCS64_CTRL_EPTP_FULL,
381 VMX_VMCS64_CTRL_EPTP_HIGH,
382 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
383 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
384 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
385 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
386 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
387 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
388 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
389 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
390 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
391 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
392 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
393 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
394 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
395 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
396 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
397 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
398 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
399 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
400 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
401 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
402 VMX_VMCS64_CTRL_SPPTP_FULL,
403 VMX_VMCS64_CTRL_SPPTP_HIGH,
404 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
405 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
406 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
407 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
408 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
409 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
410
411 /* 64-bit read-only data fields. */
412 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
413 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
414
415 /* 64-bit guest-state fields. */
416 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
417 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
418 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
419 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
420 VMX_VMCS64_GUEST_PAT_FULL,
421 VMX_VMCS64_GUEST_PAT_HIGH,
422 VMX_VMCS64_GUEST_EFER_FULL,
423 VMX_VMCS64_GUEST_EFER_HIGH,
424 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
425 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
426 VMX_VMCS64_GUEST_PDPTE0_FULL,
427 VMX_VMCS64_GUEST_PDPTE0_HIGH,
428 VMX_VMCS64_GUEST_PDPTE1_FULL,
429 VMX_VMCS64_GUEST_PDPTE1_HIGH,
430 VMX_VMCS64_GUEST_PDPTE2_FULL,
431 VMX_VMCS64_GUEST_PDPTE2_HIGH,
432 VMX_VMCS64_GUEST_PDPTE3_FULL,
433 VMX_VMCS64_GUEST_PDPTE3_HIGH,
434 VMX_VMCS64_GUEST_BNDCFGS_FULL,
435 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
436 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
437 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
438 VMX_VMCS64_GUEST_PKRS_FULL,
439 VMX_VMCS64_GUEST_PKRS_HIGH,
440
441 /* 64-bit host-state fields. */
442 VMX_VMCS64_HOST_PAT_FULL,
443 VMX_VMCS64_HOST_PAT_HIGH,
444 VMX_VMCS64_HOST_EFER_FULL,
445 VMX_VMCS64_HOST_EFER_HIGH,
446 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
447 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
448 VMX_VMCS64_HOST_PKRS_FULL,
449 VMX_VMCS64_HOST_PKRS_HIGH,
450
451 /* 32-bit control fields. */
452 VMX_VMCS32_CTRL_PIN_EXEC,
453 VMX_VMCS32_CTRL_PROC_EXEC,
454 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
455 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
456 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
457 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
458 VMX_VMCS32_CTRL_EXIT,
459 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
460 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
461 VMX_VMCS32_CTRL_ENTRY,
462 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
463 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
464 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
465 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
466 VMX_VMCS32_CTRL_TPR_THRESHOLD,
467 VMX_VMCS32_CTRL_PROC_EXEC2,
468 VMX_VMCS32_CTRL_PLE_GAP,
469 VMX_VMCS32_CTRL_PLE_WINDOW,
470
471 /* 32-bits read-only fields. */
472 VMX_VMCS32_RO_VM_INSTR_ERROR,
473 VMX_VMCS32_RO_EXIT_REASON,
474 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
475 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
476 VMX_VMCS32_RO_IDT_VECTORING_INFO,
477 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
478 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
479 VMX_VMCS32_RO_EXIT_INSTR_INFO,
480
481 /* 32-bit guest-state fields. */
482 VMX_VMCS32_GUEST_ES_LIMIT,
483 VMX_VMCS32_GUEST_CS_LIMIT,
484 VMX_VMCS32_GUEST_SS_LIMIT,
485 VMX_VMCS32_GUEST_DS_LIMIT,
486 VMX_VMCS32_GUEST_FS_LIMIT,
487 VMX_VMCS32_GUEST_GS_LIMIT,
488 VMX_VMCS32_GUEST_LDTR_LIMIT,
489 VMX_VMCS32_GUEST_TR_LIMIT,
490 VMX_VMCS32_GUEST_GDTR_LIMIT,
491 VMX_VMCS32_GUEST_IDTR_LIMIT,
492 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
493 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
494 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
495 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
496 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
497 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
498 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
499 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
500 VMX_VMCS32_GUEST_INT_STATE,
501 VMX_VMCS32_GUEST_ACTIVITY_STATE,
502 VMX_VMCS32_GUEST_SMBASE,
503 VMX_VMCS32_GUEST_SYSENTER_CS,
504 VMX_VMCS32_PREEMPT_TIMER_VALUE,
505
506 /* 32-bit host-state fields. */
507 VMX_VMCS32_HOST_SYSENTER_CS,
508
509 /* Natural-width control fields. */
510 VMX_VMCS_CTRL_CR0_MASK,
511 VMX_VMCS_CTRL_CR4_MASK,
512 VMX_VMCS_CTRL_CR0_READ_SHADOW,
513 VMX_VMCS_CTRL_CR4_READ_SHADOW,
514 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
515 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
516 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
517 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
518
519 /* Natural-width read-only data fields. */
520 VMX_VMCS_RO_EXIT_QUALIFICATION,
521 VMX_VMCS_RO_IO_RCX,
522 VMX_VMCS_RO_IO_RSI,
523 VMX_VMCS_RO_IO_RDI,
524 VMX_VMCS_RO_IO_RIP,
525 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
526
527 /* Natural-width guest-state field */
528 VMX_VMCS_GUEST_CR0,
529 VMX_VMCS_GUEST_CR3,
530 VMX_VMCS_GUEST_CR4,
531 VMX_VMCS_GUEST_ES_BASE,
532 VMX_VMCS_GUEST_CS_BASE,
533 VMX_VMCS_GUEST_SS_BASE,
534 VMX_VMCS_GUEST_DS_BASE,
535 VMX_VMCS_GUEST_FS_BASE,
536 VMX_VMCS_GUEST_GS_BASE,
537 VMX_VMCS_GUEST_LDTR_BASE,
538 VMX_VMCS_GUEST_TR_BASE,
539 VMX_VMCS_GUEST_GDTR_BASE,
540 VMX_VMCS_GUEST_IDTR_BASE,
541 VMX_VMCS_GUEST_DR7,
542 VMX_VMCS_GUEST_RSP,
543 VMX_VMCS_GUEST_RIP,
544 VMX_VMCS_GUEST_RFLAGS,
545 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
546 VMX_VMCS_GUEST_SYSENTER_ESP,
547 VMX_VMCS_GUEST_SYSENTER_EIP,
548 VMX_VMCS_GUEST_S_CET,
549 VMX_VMCS_GUEST_SSP,
550 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
551
552 /* Natural-width host-state fields */
553 VMX_VMCS_HOST_CR0,
554 VMX_VMCS_HOST_CR3,
555 VMX_VMCS_HOST_CR4,
556 VMX_VMCS_HOST_FS_BASE,
557 VMX_VMCS_HOST_GS_BASE,
558 VMX_VMCS_HOST_TR_BASE,
559 VMX_VMCS_HOST_GDTR_BASE,
560 VMX_VMCS_HOST_IDTR_BASE,
561 VMX_VMCS_HOST_SYSENTER_ESP,
562 VMX_VMCS_HOST_SYSENTER_EIP,
563 VMX_VMCS_HOST_RSP,
564 VMX_VMCS_HOST_RIP,
565 VMX_VMCS_HOST_S_CET,
566 VMX_VMCS_HOST_SSP,
567 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
568};
569#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
570
571#ifdef VBOX_STRICT
572static const uint32_t g_aVmcsSegBase[] =
573{
574 VMX_VMCS_GUEST_ES_BASE,
575 VMX_VMCS_GUEST_CS_BASE,
576 VMX_VMCS_GUEST_SS_BASE,
577 VMX_VMCS_GUEST_DS_BASE,
578 VMX_VMCS_GUEST_FS_BASE,
579 VMX_VMCS_GUEST_GS_BASE
580};
581static const uint32_t g_aVmcsSegSel[] =
582{
583 VMX_VMCS16_GUEST_ES_SEL,
584 VMX_VMCS16_GUEST_CS_SEL,
585 VMX_VMCS16_GUEST_SS_SEL,
586 VMX_VMCS16_GUEST_DS_SEL,
587 VMX_VMCS16_GUEST_FS_SEL,
588 VMX_VMCS16_GUEST_GS_SEL
589};
590static const uint32_t g_aVmcsSegLimit[] =
591{
592 VMX_VMCS32_GUEST_ES_LIMIT,
593 VMX_VMCS32_GUEST_CS_LIMIT,
594 VMX_VMCS32_GUEST_SS_LIMIT,
595 VMX_VMCS32_GUEST_DS_LIMIT,
596 VMX_VMCS32_GUEST_FS_LIMIT,
597 VMX_VMCS32_GUEST_GS_LIMIT
598};
599static const uint32_t g_aVmcsSegAttr[] =
600{
601 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
602 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
603 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
604 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
605 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
606 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
607};
608AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
609AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
610AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
611AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
612#endif /* VBOX_STRICT */
613
614#ifdef HMVMX_USE_FUNCTION_TABLE
615/**
616 * VMX_EXIT dispatch table.
617 */
618static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
619{
620 /* 0 VMX_EXIT_XCPT_OR_NMI */ { vmxHCExitXcptOrNmi },
621 /* 1 VMX_EXIT_EXT_INT */ { vmxHCExitExtInt },
622 /* 2 VMX_EXIT_TRIPLE_FAULT */ { vmxHCExitTripleFault },
623 /* 3 VMX_EXIT_INIT_SIGNAL */ { vmxHCExitErrUnexpected },
624 /* 4 VMX_EXIT_SIPI */ { vmxHCExitErrUnexpected },
625 /* 5 VMX_EXIT_IO_SMI */ { vmxHCExitErrUnexpected },
626 /* 6 VMX_EXIT_SMI */ { vmxHCExitErrUnexpected },
627 /* 7 VMX_EXIT_INT_WINDOW */ { vmxHCExitIntWindow },
628 /* 8 VMX_EXIT_NMI_WINDOW */ { vmxHCExitNmiWindow },
629 /* 9 VMX_EXIT_TASK_SWITCH */ { vmxHCExitTaskSwitch },
630 /* 10 VMX_EXIT_CPUID */ { vmxHCExitCpuid },
631 /* 11 VMX_EXIT_GETSEC */ { vmxHCExitGetsec },
632 /* 12 VMX_EXIT_HLT */ { vmxHCExitHlt },
633 /* 13 VMX_EXIT_INVD */ { vmxHCExitInvd },
634 /* 14 VMX_EXIT_INVLPG */ { vmxHCExitInvlpg },
635 /* 15 VMX_EXIT_RDPMC */ { vmxHCExitRdpmc },
636 /* 16 VMX_EXIT_RDTSC */ { vmxHCExitRdtsc },
637 /* 17 VMX_EXIT_RSM */ { vmxHCExitErrUnexpected },
638 /* 18 VMX_EXIT_VMCALL */ { vmxHCExitVmcall },
639#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
640 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitVmclear },
641 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitVmlaunch },
642 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitVmptrld },
643 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitVmptrst },
644 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitVmread },
645 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitVmresume },
646 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitVmwrite },
647 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitVmxoff },
648 /* 27 VMX_EXIT_VMXON */ { vmxHCExitVmxon },
649#else
650 /* 19 VMX_EXIT_VMCLEAR */ { vmxHCExitSetPendingXcptUD },
651 /* 20 VMX_EXIT_VMLAUNCH */ { vmxHCExitSetPendingXcptUD },
652 /* 21 VMX_EXIT_VMPTRLD */ { vmxHCExitSetPendingXcptUD },
653 /* 22 VMX_EXIT_VMPTRST */ { vmxHCExitSetPendingXcptUD },
654 /* 23 VMX_EXIT_VMREAD */ { vmxHCExitSetPendingXcptUD },
655 /* 24 VMX_EXIT_VMRESUME */ { vmxHCExitSetPendingXcptUD },
656 /* 25 VMX_EXIT_VMWRITE */ { vmxHCExitSetPendingXcptUD },
657 /* 26 VMX_EXIT_VMXOFF */ { vmxHCExitSetPendingXcptUD },
658 /* 27 VMX_EXIT_VMXON */ { vmxHCExitSetPendingXcptUD },
659#endif
660 /* 28 VMX_EXIT_MOV_CRX */ { vmxHCExitMovCRx },
661 /* 29 VMX_EXIT_MOV_DRX */ { vmxHCExitMovDRx },
662 /* 30 VMX_EXIT_IO_INSTR */ { vmxHCExitIoInstr },
663 /* 31 VMX_EXIT_RDMSR */ { vmxHCExitRdmsr },
664 /* 32 VMX_EXIT_WRMSR */ { vmxHCExitWrmsr },
665 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { vmxHCExitErrInvalidGuestState },
666 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { vmxHCExitErrUnexpected },
667 /* 35 UNDEFINED */ { vmxHCExitErrUnexpected },
668 /* 36 VMX_EXIT_MWAIT */ { vmxHCExitMwait },
669 /* 37 VMX_EXIT_MTF */ { vmxHCExitMtf },
670 /* 38 UNDEFINED */ { vmxHCExitErrUnexpected },
671 /* 39 VMX_EXIT_MONITOR */ { vmxHCExitMonitor },
672 /* 40 VMX_EXIT_PAUSE */ { vmxHCExitPause },
673 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { vmxHCExitErrUnexpected },
674 /* 42 UNDEFINED */ { vmxHCExitErrUnexpected },
675 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { vmxHCExitTprBelowThreshold },
676 /* 44 VMX_EXIT_APIC_ACCESS */ { vmxHCExitApicAccess },
677 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { vmxHCExitErrUnexpected },
678 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { vmxHCExitErrUnexpected },
679 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { vmxHCExitErrUnexpected },
680 /* 48 VMX_EXIT_EPT_VIOLATION */ { vmxHCExitEptViolation },
681 /* 49 VMX_EXIT_EPT_MISCONFIG */ { vmxHCExitEptMisconfig },
682 /* 50 VMX_EXIT_INVEPT */ { vmxHCExitSetPendingXcptUD },
683 /* 51 VMX_EXIT_RDTSCP */ { vmxHCExitRdtscp },
684 /* 52 VMX_EXIT_PREEMPT_TIMER */ { vmxHCExitPreemptTimer },
685#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
686 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitInvvpid },
687#else
688 /* 53 VMX_EXIT_INVVPID */ { vmxHCExitSetPendingXcptUD },
689#endif
690 /* 54 VMX_EXIT_WBINVD */ { vmxHCExitWbinvd },
691 /* 55 VMX_EXIT_XSETBV */ { vmxHCExitXsetbv },
692 /* 56 VMX_EXIT_APIC_WRITE */ { vmxHCExitErrUnexpected },
693 /* 57 VMX_EXIT_RDRAND */ { vmxHCExitErrUnexpected },
694 /* 58 VMX_EXIT_INVPCID */ { vmxHCExitInvpcid },
695 /* 59 VMX_EXIT_VMFUNC */ { vmxHCExitErrUnexpected },
696 /* 60 VMX_EXIT_ENCLS */ { vmxHCExitErrUnexpected },
697 /* 61 VMX_EXIT_RDSEED */ { vmxHCExitErrUnexpected },
698 /* 62 VMX_EXIT_PML_FULL */ { vmxHCExitErrUnexpected },
699 /* 63 VMX_EXIT_XSAVES */ { vmxHCExitErrUnexpected },
700 /* 64 VMX_EXIT_XRSTORS */ { vmxHCExitErrUnexpected },
701 /* 65 UNDEFINED */ { vmxHCExitErrUnexpected },
702 /* 66 VMX_EXIT_SPP_EVENT */ { vmxHCExitErrUnexpected },
703 /* 67 VMX_EXIT_UMWAIT */ { vmxHCExitErrUnexpected },
704 /* 68 VMX_EXIT_TPAUSE */ { vmxHCExitErrUnexpected },
705 /* 69 VMX_EXIT_LOADIWKEY */ { vmxHCExitErrUnexpected },
706};
707#endif /* HMVMX_USE_FUNCTION_TABLE */
708
709#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
710static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
711{
712 /* 0 */ "(Not Used)",
713 /* 1 */ "VMCALL executed in VMX root operation.",
714 /* 2 */ "VMCLEAR with invalid physical address.",
715 /* 3 */ "VMCLEAR with VMXON pointer.",
716 /* 4 */ "VMLAUNCH with non-clear VMCS.",
717 /* 5 */ "VMRESUME with non-launched VMCS.",
718 /* 6 */ "VMRESUME after VMXOFF",
719 /* 7 */ "VM-entry with invalid control fields.",
720 /* 8 */ "VM-entry with invalid host state fields.",
721 /* 9 */ "VMPTRLD with invalid physical address.",
722 /* 10 */ "VMPTRLD with VMXON pointer.",
723 /* 11 */ "VMPTRLD with incorrect revision identifier.",
724 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
725 /* 13 */ "VMWRITE to read-only VMCS component.",
726 /* 14 */ "(Not Used)",
727 /* 15 */ "VMXON executed in VMX root operation.",
728 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
729 /* 17 */ "VM-entry with non-launched executing VMCS.",
730 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
731 /* 19 */ "VMCALL with non-clear VMCS.",
732 /* 20 */ "VMCALL with invalid VM-exit control fields.",
733 /* 21 */ "(Not Used)",
734 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
735 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
736 /* 24 */ "VMCALL with invalid SMM-monitor features.",
737 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
738 /* 26 */ "VM-entry with events blocked by MOV SS.",
739 /* 27 */ "(Not Used)",
740 /* 28 */ "Invalid operand to INVEPT/INVVPID."
741};
742#endif /* VBOX_STRICT && LOG_ENABLED */
743
744
745#ifdef IN_RING0
746/**
747 * Checks if the given MSR is part of the lastbranch-from-IP MSR stack.
748 * @returns @c true if it's part of LBR stack, @c false otherwise.
749 *
750 * @param pVM The cross context VM structure.
751 * @param idMsr The MSR.
752 * @param pidxMsr Where to store the index of the MSR in the LBR MSR array.
753 * Optional, can be NULL.
754 *
755 * @remarks Must only be called when LBR is enabled.
756 */
757DECL_FORCE_INLINE(bool) vmxHCIsLbrBranchFromMsr(PCVMCC pVM, uint32_t idMsr, uint32_t *pidxMsr)
758{
759 Assert(VM_IS_VMX_LBR(pVM));
760 Assert(pVM->hmr0.s.vmx.idLbrFromIpMsrFirst);
761 uint32_t const cLbrStack = pVM->hmr0.s.vmx.idLbrFromIpMsrLast - pVM->hmr0.s.vmx.idLbrFromIpMsrFirst + 1;
762 uint32_t const idxMsr = idMsr - pVM->hmr0.s.vmx.idLbrFromIpMsrFirst;
763 if (idxMsr < cLbrStack)
764 {
765 if (pidxMsr)
766 *pidxMsr = idxMsr;
767 return true;
768 }
769 return false;
770}
771
772
773/**
774 * Checks if the given MSR is part of the lastbranch-to-IP MSR stack.
775 * @returns @c true if it's part of LBR stack, @c false otherwise.
776 *
777 * @param pVM The cross context VM structure.
778 * @param idMsr The MSR.
779 * @param pidxMsr Where to store the index of the MSR in the LBR MSR array.
780 * Optional, can be NULL.
781 *
782 * @remarks Must only be called when LBR is enabled and when lastbranch-to-IP MSRs
783 * are supported by the CPU (see vmxHCSetupLbrMsrRange).
784 */
785DECL_FORCE_INLINE(bool) vmxHCIsLbrBranchToMsr(PCVMCC pVM, uint32_t idMsr, uint32_t *pidxMsr)
786{
787 Assert(VM_IS_VMX_LBR(pVM));
788 if (pVM->hmr0.s.vmx.idLbrToIpMsrFirst)
789 {
790 uint32_t const cLbrStack = pVM->hmr0.s.vmx.idLbrToIpMsrLast - pVM->hmr0.s.vmx.idLbrToIpMsrFirst + 1;
791 uint32_t const idxMsr = idMsr - pVM->hmr0.s.vmx.idLbrToIpMsrFirst;
792 if (idxMsr < cLbrStack)
793 {
794 if (pidxMsr)
795 *pidxMsr = idxMsr;
796 return true;
797 }
798 }
799 return false;
800}
801#endif
802
803
804/**
805 * Gets the CR0 guest/host mask.
806 *
807 * These bits typically does not change through the lifetime of a VM. Any bit set in
808 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
809 * by the guest.
810 *
811 * @returns The CR0 guest/host mask.
812 * @param pVCpu The cross context virtual CPU structure.
813 */
814static uint64_t vmxHCGetFixedCr0Mask(PCVMCPUCC pVCpu)
815{
816 /*
817 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
818 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
819 *
820 * Furthermore, modifications to any bits that are reserved/unspecified currently
821 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
822 * when future CPUs specify and use currently reserved/unspecified bits.
823 */
824 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
825 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
826 * and @bugref{6944}. */
827 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
828 return ( X86_CR0_PE
829 | X86_CR0_NE
830 | (VM_IS_VMX_NESTED_PAGING(pVM) ? 0 : X86_CR0_WP)
831 | X86_CR0_PG
832 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
833}
834
835
836/**
837 * Gets the CR4 guest/host mask.
838 *
839 * These bits typically does not change through the lifetime of a VM. Any bit set in
840 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
841 * by the guest.
842 *
843 * @returns The CR4 guest/host mask.
844 * @param pVCpu The cross context virtual CPU structure.
845 */
846static uint64_t vmxHCGetFixedCr4Mask(PCVMCPUCC pVCpu)
847{
848 /*
849 * We construct a mask of all CR4 bits that the guest can modify without causing
850 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
851 * a VM-exit when the guest attempts to modify them when executing using
852 * hardware-assisted VMX.
853 *
854 * When a feature is not exposed to the guest (and may be present on the host),
855 * we want to intercept guest modifications to the bit so we can emulate proper
856 * behavior (e.g., #GP).
857 *
858 * Furthermore, only modifications to those bits that don't require immediate
859 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
860 * depends on CR3 which might not always be the guest value while executing
861 * using hardware-assisted VMX.
862 */
863 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
864 bool const fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
865 bool const fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
866 bool const fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
867
868 /*
869 * Paranoia.
870 * Ensure features exposed to the guest are present on the host.
871 */
872 Assert(!fFsGsBase || pVM->cpum.ro.HostFeatures.fFsGsBase);
873 Assert(!fXSaveRstor || pVM->cpum.ro.HostFeatures.fXSaveRstor);
874 Assert(!fFxSaveRstor || pVM->cpum.ro.HostFeatures.fFxSaveRstor);
875
876 uint64_t const fGstMask = ( X86_CR4_PVI
877 | X86_CR4_TSD
878 | X86_CR4_DE
879 | X86_CR4_MCE
880 | X86_CR4_PCE
881 | X86_CR4_OSXMMEEXCPT
882 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
883 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
884 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0));
885 return ~fGstMask;
886}
887
888
889/**
890 * Returns whether the VM-exit MSR-store area differs from the VM-exit MSR-load
891 * area.
892 *
893 * @returns @c true if it's different, @c false otherwise.
894 * @param pVmcsInfo The VMCS info. object.
895 */
896DECL_FORCE_INLINE(bool) vmxHCIsSeparateExitMsrStoreAreaVmcs(PCVMXVMCSINFO pVmcsInfo)
897{
898 return RT_BOOL( pVmcsInfo->pvGuestMsrStore != pVmcsInfo->pvGuestMsrLoad
899 && pVmcsInfo->pvGuestMsrStore);
900}
901
902
903/**
904 * Sets the given Processor-based VM-execution controls.
905 *
906 * @param pVCpu The cross context virtual CPU structure.
907 * @param pVmxTransient The VMX-transient structure.
908 * @param uProcCtls The Processor-based VM-execution controls to set.
909 */
910static void vmxHCSetProcCtlsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uProcCtls)
911{
912 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
913 if ((pVmcsInfo->u32ProcCtls & uProcCtls) != uProcCtls)
914 {
915 pVmcsInfo->u32ProcCtls |= uProcCtls;
916 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
917 AssertRC(rc);
918 }
919}
920
921
922/**
923 * Removes the given Processor-based VM-execution controls.
924 *
925 * @param pVCpu The cross context virtual CPU structure.
926 * @param pVmxTransient The VMX-transient structure.
927 * @param uProcCtls The Processor-based VM-execution controls to remove.
928 *
929 * @remarks When executing a nested-guest, this will not remove any of the specified
930 * controls if the nested hypervisor has set any one of them.
931 */
932static void vmxHCRemoveProcCtlsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uProcCtls)
933{
934 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
935 if (pVmcsInfo->u32ProcCtls & uProcCtls)
936 {
937#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
938 if ( !pVmxTransient->fIsNestedGuest
939 || !CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uProcCtls))
940#else
941 NOREF(pVCpu);
942 if (!pVmxTransient->fIsNestedGuest)
943#endif
944 {
945 pVmcsInfo->u32ProcCtls &= ~uProcCtls;
946 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
947 AssertRC(rc);
948 }
949 }
950}
951
952
953/**
954 * Sets the TSC offset for the current VMCS.
955 *
956 * @param pVCpu The cross context virtual CPU structure.
957 * @param uTscOffset The TSC offset to set.
958 * @param pVmcsInfo The VMCS info. object.
959 */
960static void vmxHCSetTscOffsetVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t uTscOffset)
961{
962 if (pVmcsInfo->u64TscOffset != uTscOffset)
963 {
964 int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, uTscOffset);
965 AssertRC(rc);
966 pVmcsInfo->u64TscOffset = uTscOffset;
967 }
968}
969
970
971/**
972 * Adds one or more exceptions to the exception bitmap and commits it to the current
973 * VMCS.
974 *
975 * @param pVCpu The cross context virtual CPU structure.
976 * @param pVmxTransient The VMX-transient structure.
977 * @param uXcptMask The exception(s) to add.
978 */
979static void vmxHCAddXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
980{
981 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
982 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
983 if ((uXcptBitmap & uXcptMask) != uXcptMask)
984 {
985 uXcptBitmap |= uXcptMask;
986 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
987 AssertRC(rc);
988 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
989 }
990}
991
992
993/**
994 * Adds an exception to the exception bitmap and commits it to the current VMCS.
995 *
996 * @param pVCpu The cross context virtual CPU structure.
997 * @param pVmxTransient The VMX-transient structure.
998 * @param uXcpt The exception to add.
999 */
1000static void vmxHCAddXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
1001{
1002 Assert(uXcpt <= X86_XCPT_LAST);
1003 vmxHCAddXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT_32(uXcpt));
1004}
1005
1006
1007/**
1008 * Remove one or more exceptions from the exception bitmap and commits it to the
1009 * current VMCS.
1010 *
1011 * This takes care of not removing the exception intercept if a nested-guest
1012 * requires the exception to be intercepted.
1013 *
1014 * @returns VBox status code.
1015 * @param pVCpu The cross context virtual CPU structure.
1016 * @param pVmxTransient The VMX-transient structure.
1017 * @param uXcptMask The exception(s) to remove.
1018 */
1019static int vmxHCRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
1020{
1021 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1022 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
1023 if (u32XcptBitmap & uXcptMask)
1024 {
1025#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1026 if (!pVmxTransient->fIsNestedGuest)
1027 { /* likely */ }
1028 else
1029 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
1030#endif
1031#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
1032 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
1033 | RT_BIT(X86_XCPT_DE)
1034 | RT_BIT(X86_XCPT_NM)
1035 | RT_BIT(X86_XCPT_TS)
1036 | RT_BIT(X86_XCPT_UD)
1037 | RT_BIT(X86_XCPT_NP)
1038 | RT_BIT(X86_XCPT_SS)
1039 | RT_BIT(X86_XCPT_GP)
1040 | RT_BIT(X86_XCPT_PF)
1041 | RT_BIT(X86_XCPT_MF));
1042#elif defined(HMVMX_ALWAYS_TRAP_PF)
1043 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
1044#endif
1045 if (uXcptMask)
1046 {
1047 /* Validate we are not removing any essential exception intercepts. */
1048#ifdef IN_RING0
1049 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
1050#else
1051 Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
1052#endif
1053 NOREF(pVCpu);
1054 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
1055 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
1056
1057 /* Remove it from the exception bitmap. */
1058 u32XcptBitmap &= ~uXcptMask;
1059
1060 /* Commit and update the cache if necessary. */
1061 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
1062 {
1063 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
1064 AssertRC(rc);
1065 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
1066 }
1067 }
1068 }
1069 return VINF_SUCCESS;
1070}
1071
1072
1073/**
1074 * Remove an exceptions from the exception bitmap and commits it to the current
1075 * VMCS.
1076 *
1077 * @returns VBox status code.
1078 * @param pVCpu The cross context virtual CPU structure.
1079 * @param pVmxTransient The VMX-transient structure.
1080 * @param uXcpt The exception to remove.
1081 */
1082static int vmxHCRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
1083{
1084 return vmxHCRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
1085}
1086
1087
1088#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1089/**
1090 * Loads the shadow VMCS specified by the VMCS info. object.
1091 *
1092 * @returns VBox status code.
1093 * @param pVmcsInfo The VMCS info. object.
1094 *
1095 * @remarks Can be called with interrupts disabled.
1096 */
1097static int vmxHCLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
1098{
1099 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1100 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1101
1102 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
1103 if (RT_SUCCESS(rc))
1104 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
1105 return rc;
1106}
1107
1108
1109/**
1110 * Clears the shadow VMCS specified by the VMCS info. object.
1111 *
1112 * @returns VBox status code.
1113 * @param pVmcsInfo The VMCS info. object.
1114 *
1115 * @remarks Can be called with interrupts disabled.
1116 */
1117static int vmxHCClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
1118{
1119 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1120 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1121
1122 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
1123 if (RT_SUCCESS(rc))
1124 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
1125 return rc;
1126}
1127
1128
1129/**
1130 * Switches from and to the specified VMCSes.
1131 *
1132 * @returns VBox status code.
1133 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
1134 * @param pVmcsInfoTo The VMCS info. object we are switching to.
1135 *
1136 * @remarks Called with interrupts disabled.
1137 */
1138static int vmxHCSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
1139{
1140 /*
1141 * Clear the VMCS we are switching out if it has not already been cleared.
1142 * This will sync any CPU internal data back to the VMCS.
1143 */
1144 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1145 {
1146 int rc = vmxHCClearVmcs(pVmcsInfoFrom);
1147 if (RT_SUCCESS(rc))
1148 {
1149 /*
1150 * The shadow VMCS, if any, would not be active at this point since we
1151 * would have cleared it while importing the virtual hardware-virtualization
1152 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1153 * clear the shadow VMCS here, just assert for safety.
1154 */
1155 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1156 }
1157 else
1158 return rc;
1159 }
1160
1161 /*
1162 * Clear the VMCS we are switching to if it has not already been cleared.
1163 * This will initialize the VMCS launch state to "clear" required for loading it.
1164 *
1165 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1166 */
1167 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1168 {
1169 int rc = vmxHCClearVmcs(pVmcsInfoTo);
1170 if (RT_SUCCESS(rc))
1171 { /* likely */ }
1172 else
1173 return rc;
1174 }
1175
1176 /*
1177 * Finally, load the VMCS we are switching to.
1178 */
1179 return vmxHCLoadVmcs(pVmcsInfoTo);
1180}
1181
1182
1183/**
1184 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1185 * caller.
1186 *
1187 * @returns VBox status code.
1188 * @param pVCpu The cross context virtual CPU structure.
1189 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1190 * true) or guest VMCS (pass false).
1191 */
1192static int vmxHCSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1193{
1194 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1195 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1196
1197 PVMXVMCSINFO pVmcsInfoFrom;
1198 PVMXVMCSINFO pVmcsInfoTo;
1199 if (fSwitchToNstGstVmcs)
1200 {
1201 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1202 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1203 }
1204 else
1205 {
1206 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1207 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1208 }
1209
1210 /*
1211 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1212 * preemption hook code path acquires the current VMCS.
1213 */
1214 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1215
1216 int rc = vmxHCSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1217 if (RT_SUCCESS(rc))
1218 {
1219 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1220 VCPU_2_VMXSTATE(pVCpu).vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1221
1222 /*
1223 * If we are switching to a VMCS that was executed on a different host CPU or was
1224 * never executed before, flag that we need to export the host state before executing
1225 * guest/nested-guest code using hardware-assisted VMX.
1226 *
1227 * This could probably be done in a preemptible context since the preemption hook
1228 * will flag the necessary change in host context. However, since preemption is
1229 * already disabled and to avoid making assumptions about host specific code in
1230 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1231 * disabled.
1232 */
1233 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1234 { /* likely */ }
1235 else
1236 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1237
1238 ASMSetFlags(fEFlags);
1239
1240 /*
1241 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1242 * flag that we need to update the host MSR values there. Even if we decide in the
1243 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1244 * if its content differs, we would have to update the host MSRs anyway.
1245 */
1246 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1247 }
1248 else
1249 ASMSetFlags(fEFlags);
1250 return rc;
1251}
1252#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1253
1254
1255/**
1256 * Updates the VM's last error record.
1257 *
1258 * If there was a VMX instruction error, reads the error data from the VMCS and
1259 * updates VCPU's last error record as well.
1260 *
1261 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1262 * Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or
1263 * VERR_VMX_INVALID_VMCS_FIELD.
1264 * @param rc The error code.
1265 */
1266static void vmxHCUpdateErrorRecord(PVMCPUCC pVCpu, int rc)
1267{
1268 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
1269 || rc == VERR_VMX_UNABLE_TO_START_VM)
1270 {
1271 AssertPtrReturnVoid(pVCpu);
1272 VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_VM_INSTR_ERROR, &VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32InstrError);
1273 }
1274#ifdef IN_RING0
1275 pVCpu->CTX_SUFF(pVM)->hm.s.ForR3.rcInit = rc;
1276#endif
1277}
1278
1279
1280#ifdef VBOX_STRICT
1281/**
1282 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1283 * transient structure.
1284 *
1285 * @param pVCpu The cross context virtual CPU structure.
1286 * @param pVmxTransient The VMX-transient structure.
1287 */
1288DECLINLINE(void) vmxHCReadEntryIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1289{
1290 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1291 AssertRC(rc);
1292}
1293
1294
1295/**
1296 * Reads the VM-entry exception error code field from the VMCS into
1297 * the VMX transient structure.
1298 *
1299 * @param pVCpu The cross context virtual CPU structure.
1300 * @param pVmxTransient The VMX-transient structure.
1301 */
1302DECLINLINE(void) vmxHCReadEntryXcptErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1303{
1304 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1305 AssertRC(rc);
1306}
1307
1308
1309/**
1310 * Reads the VM-entry exception error code field from the VMCS into
1311 * the VMX transient structure.
1312 *
1313 * @param pVCpu The cross context virtual CPU structure.
1314 * @param pVmxTransient The VMX-transient structure.
1315 */
1316DECLINLINE(void) vmxHCReadEntryInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1317{
1318 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1319 AssertRC(rc);
1320}
1321#endif /* VBOX_STRICT */
1322
1323
1324/**
1325 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1326 * transient structure.
1327 *
1328 * @param pVCpu The cross context virtual CPU structure.
1329 * @param pVmxTransient The VMX-transient structure.
1330 */
1331DECLINLINE(void) vmxHCReadExitIntInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1332{
1333 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1334 {
1335 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1336 AssertRC(rc);
1337 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1338 }
1339}
1340
1341
1342/**
1343 * Reads the VM-exit interruption error code from the VMCS into the VMX
1344 * transient structure.
1345 *
1346 * @param pVCpu The cross context virtual CPU structure.
1347 * @param pVmxTransient The VMX-transient structure.
1348 */
1349DECLINLINE(void) vmxHCReadExitIntErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1350{
1351 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1352 {
1353 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1354 AssertRC(rc);
1355 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1356 }
1357}
1358
1359
1360/**
1361 * Reads the VM-exit instruction length field from the VMCS into the VMX
1362 * transient structure.
1363 *
1364 * @param pVCpu The cross context virtual CPU structure.
1365 * @param pVmxTransient The VMX-transient structure.
1366 */
1367DECLINLINE(void) vmxHCReadExitInstrLenVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1368{
1369 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1370 {
1371 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1372 AssertRC(rc);
1373 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1374 }
1375}
1376
1377
1378/**
1379 * Reads the VM-exit instruction-information field from the VMCS into
1380 * the VMX transient structure.
1381 *
1382 * @param pVCpu The cross context virtual CPU structure.
1383 * @param pVmxTransient The VMX-transient structure.
1384 */
1385DECLINLINE(void) vmxHCReadExitInstrInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1386{
1387 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1388 {
1389 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1390 AssertRC(rc);
1391 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1392 }
1393}
1394
1395
1396/**
1397 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1398 *
1399 * @param pVCpu The cross context virtual CPU structure.
1400 * @param pVmxTransient The VMX-transient structure.
1401 */
1402DECLINLINE(void) vmxHCReadExitQualVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1403{
1404 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1405 {
1406 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1407 AssertRC(rc);
1408 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1409 }
1410}
1411
1412
1413/**
1414 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1415 *
1416 * @param pVCpu The cross context virtual CPU structure.
1417 * @param pVmxTransient The VMX-transient structure.
1418 */
1419DECLINLINE(void) vmxHCReadGuestLinearAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1420{
1421 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1422 {
1423 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1424 AssertRC(rc);
1425 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1426 }
1427}
1428
1429
1430/**
1431 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1432 *
1433 * @param pVCpu The cross context virtual CPU structure.
1434 * @param pVmxTransient The VMX-transient structure.
1435 */
1436DECLINLINE(void) vmxHCReadGuestPhysicalAddrVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1437{
1438 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1439 {
1440 int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1441 AssertRC(rc);
1442 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1443 }
1444}
1445
1446#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1447/**
1448 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1449 * structure.
1450 *
1451 * @param pVCpu The cross context virtual CPU structure.
1452 * @param pVmxTransient The VMX-transient structure.
1453 */
1454DECLINLINE(void) vmxHCReadGuestPendingDbgXctps(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1455{
1456 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1457 {
1458 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1459 AssertRC(rc);
1460 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1461 }
1462}
1463#endif
1464
1465/**
1466 * Reads the IDT-vectoring information field from the VMCS into the VMX
1467 * transient structure.
1468 *
1469 * @param pVCpu The cross context virtual CPU structure.
1470 * @param pVmxTransient The VMX-transient structure.
1471 *
1472 * @remarks No-long-jump zone!!!
1473 */
1474DECLINLINE(void) vmxHCReadIdtVectoringInfoVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1475{
1476 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1477 {
1478 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1479 AssertRC(rc);
1480 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1481 }
1482}
1483
1484
1485/**
1486 * Reads the IDT-vectoring error code from the VMCS into the VMX
1487 * transient structure.
1488 *
1489 * @param pVCpu The cross context virtual CPU structure.
1490 * @param pVmxTransient The VMX-transient structure.
1491 */
1492DECLINLINE(void) vmxHCReadIdtVectoringErrorCodeVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1493{
1494 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1495 {
1496 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1497 AssertRC(rc);
1498 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1499 }
1500}
1501
1502#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1503/**
1504 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1505 *
1506 * @param pVCpu The cross context virtual CPU structure.
1507 * @param pVmxTransient The VMX-transient structure.
1508 */
1509static void vmxHCReadAllRoFieldsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1510{
1511 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1512 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1513 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1514 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1515 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1516 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1517 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1518 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1519 rc |= VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1520 AssertRC(rc);
1521 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1522 | HMVMX_READ_EXIT_INSTR_LEN
1523 | HMVMX_READ_EXIT_INSTR_INFO
1524 | HMVMX_READ_IDT_VECTORING_INFO
1525 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1526 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1527 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1528 | HMVMX_READ_GUEST_LINEAR_ADDR
1529 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1530}
1531#endif
1532
1533#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1534/**
1535 * Returns whether an MSR at the given MSR-bitmap offset is intercepted or not.
1536 *
1537 * @returns @c true if the MSR is intercepted, @c false otherwise.
1538 * @param pbMsrBitmap The MSR bitmap.
1539 * @param offMsr The MSR byte offset.
1540 * @param iBit The bit offset from the byte offset.
1541 */
1542DECLINLINE(bool) vmxHCIsMsrBitSet(uint8_t const *pbMsrBitmap, uint16_t offMsr, int32_t iBit)
1543{
1544 Assert(offMsr + (iBit >> 3) <= X86_PAGE_4K_SIZE);
1545 return ASMBitTest(pbMsrBitmap + offMsr, iBit);
1546}
1547#endif
1548
1549/**
1550 * Sets the permission bits for the specified MSR in the given MSR bitmap.
1551 *
1552 * If the passed VMCS is a nested-guest VMCS, this function ensures that the
1553 * read/write intercept is cleared from the MSR bitmap used for hardware-assisted
1554 * VMX execution of the nested-guest, only if nested-guest is also not intercepting
1555 * the read/write access of this MSR.
1556 *
1557 * @param pVCpu The cross context virtual CPU structure.
1558 * @param pVmcsInfo The VMCS info. object.
1559 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1560 * @param idMsr The MSR value.
1561 * @param fMsrpm The MSR permissions (see VMXMSRPM_XXX). This must
1562 * include both a read -and- a write permission!
1563 *
1564 * @sa CPUMGetVmxMsrPermission.
1565 * @remarks Can be called with interrupts disabled.
1566 */
1567static void vmxHCSetMsrPermission(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs, uint32_t idMsr, uint32_t fMsrpm)
1568{
1569 uint8_t *pbMsrBitmap = (uint8_t *)pVmcsInfo->pvMsrBitmap;
1570 Assert(pbMsrBitmap);
1571 Assert(VMXMSRPM_IS_FLAG_VALID(fMsrpm));
1572
1573 /*
1574 * MSR-bitmap Layout:
1575 * Byte index MSR range Interpreted as
1576 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
1577 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
1578 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
1579 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
1580 *
1581 * A bit corresponding to an MSR within the above range causes a VM-exit
1582 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
1583 * the MSR range, it always cause a VM-exit.
1584 *
1585 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
1586 */
1587 uint16_t const offBitmapRead = 0;
1588 uint16_t const offBitmapWrite = 0x800;
1589 uint16_t offMsr;
1590 int32_t iBit;
1591 if (idMsr <= UINT32_C(0x00001fff))
1592 {
1593 offMsr = 0;
1594 iBit = idMsr;
1595 }
1596 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
1597 {
1598 offMsr = 0x400;
1599 iBit = idMsr - UINT32_C(0xc0000000);
1600 }
1601 else
1602 AssertMsgFailedReturnVoid(("Invalid MSR %#RX32\n", idMsr));
1603
1604 /*
1605 * Set the MSR read permission.
1606 */
1607 uint16_t const offMsrRead = offBitmapRead + offMsr;
1608 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
1609 if (fMsrpm & VMXMSRPM_ALLOW_RD)
1610 {
1611#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1612 bool const fClear = !fIsNstGstVmcs ? true
1613 : !vmxHCIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, offMsrRead, iBit);
1614#else
1615 RT_NOREF2(pVCpu, fIsNstGstVmcs);
1616 bool const fClear = true;
1617#endif
1618 if (fClear)
1619 ASMBitClear(pbMsrBitmap + offMsrRead, iBit);
1620 }
1621 else
1622 ASMBitSet(pbMsrBitmap + offMsrRead, iBit);
1623
1624 /*
1625 * Set the MSR write permission.
1626 */
1627 uint16_t const offMsrWrite = offBitmapWrite + offMsr;
1628 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
1629 if (fMsrpm & VMXMSRPM_ALLOW_WR)
1630 {
1631#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1632 bool const fClear = !fIsNstGstVmcs ? true
1633 : !vmxHCIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, offMsrWrite, iBit);
1634#else
1635 RT_NOREF2(pVCpu, fIsNstGstVmcs);
1636 bool const fClear = true;
1637#endif
1638 if (fClear)
1639 ASMBitClear(pbMsrBitmap + offMsrWrite, iBit);
1640 }
1641 else
1642 ASMBitSet(pbMsrBitmap + offMsrWrite, iBit);
1643}
1644
1645
1646/**
1647 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
1648 * area.
1649 *
1650 * @returns VBox status code.
1651 * @param pVCpu The cross context virtual CPU structure.
1652 * @param pVmcsInfo The VMCS info. object.
1653 * @param cMsrs The number of MSRs.
1654 */
1655static int vmxHCSetAutoLoadStoreMsrCount(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t cMsrs)
1656{
1657 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
1658 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc);
1659 if (RT_LIKELY(cMsrs < cMaxSupportedMsrs))
1660 {
1661 /* Commit the MSR counts to the VMCS and update the cache. */
1662 if (pVmcsInfo->cEntryMsrLoad != cMsrs)
1663 {
1664 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs); AssertRC(rc);
1665 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs); AssertRC(rc);
1666 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); AssertRC(rc);
1667 pVmcsInfo->cEntryMsrLoad = cMsrs;
1668 pVmcsInfo->cExitMsrStore = cMsrs;
1669 pVmcsInfo->cExitMsrLoad = cMsrs;
1670 }
1671 return VINF_SUCCESS;
1672 }
1673
1674 LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u MaxSupported=%u\n", cMsrs, cMaxSupportedMsrs));
1675 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
1676 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1677}
1678
1679
1680/**
1681 * Adds a new (or updates the value of an existing) guest/host MSR
1682 * pair to be swapped during the world-switch as part of the
1683 * auto-load/store MSR area in the VMCS.
1684 *
1685 * @returns VBox status code.
1686 * @param pVCpu The cross context virtual CPU structure.
1687 * @param pVmxTransient The VMX-transient structure.
1688 * @param idMsr The MSR.
1689 * @param uGuestMsrValue Value of the guest MSR.
1690 * @param fSetReadWrite Whether to set the guest read/write access of this
1691 * MSR (thus not causing a VM-exit).
1692 * @param fUpdateHostMsr Whether to update the value of the host MSR if
1693 * necessary.
1694 */
1695static int vmxHCAddAutoLoadStoreMsr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t idMsr, uint64_t uGuestMsrValue,
1696 bool fSetReadWrite, bool fUpdateHostMsr)
1697{
1698 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1699 bool const fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
1700 PVMXAUTOMSR pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
1701 uint32_t cMsrs = pVmcsInfo->cEntryMsrLoad;
1702 uint32_t i;
1703
1704 /* Paranoia. */
1705 Assert(pGuestMsrLoad);
1706
1707#ifndef DEBUG_bird
1708 LogFlowFunc(("pVCpu=%p idMsr=%#RX32 uGuestMsrValue=%#RX64\n", pVCpu, idMsr, uGuestMsrValue));
1709#endif
1710
1711 /* Check if the MSR already exists in the VM-entry MSR-load area. */
1712 for (i = 0; i < cMsrs; i++)
1713 {
1714 if (pGuestMsrLoad[i].u32Msr == idMsr)
1715 break;
1716 }
1717
1718 bool fAdded = false;
1719 if (i == cMsrs)
1720 {
1721 /* The MSR does not exist, bump the MSR count to make room for the new MSR. */
1722 ++cMsrs;
1723 int rc = vmxHCSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
1724 AssertMsgRCReturn(rc, ("Insufficient space to add MSR to VM-entry MSR-load/store area %u\n", idMsr), rc);
1725
1726 /* Set the guest to read/write this MSR without causing VM-exits. */
1727 if ( fSetReadWrite
1728 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
1729 vmxHCSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_ALLOW_RD_WR);
1730
1731 Log4Func(("Added MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
1732 fAdded = true;
1733 }
1734
1735 /* Update the MSR value for the newly added or already existing MSR. */
1736 pGuestMsrLoad[i].u32Msr = idMsr;
1737 pGuestMsrLoad[i].u64Value = uGuestMsrValue;
1738
1739 /* Create the corresponding slot in the VM-exit MSR-store area if we use a different page. */
1740 if (vmxHCIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
1741 {
1742 PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
1743 pGuestMsrStore[i].u32Msr = idMsr;
1744 pGuestMsrStore[i].u64Value = uGuestMsrValue;
1745 }
1746
1747 /* Update the corresponding slot in the host MSR area. */
1748 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
1749 Assert(pHostMsr != pVmcsInfo->pvGuestMsrLoad);
1750 Assert(pHostMsr != pVmcsInfo->pvGuestMsrStore);
1751 pHostMsr[i].u32Msr = idMsr;
1752
1753#ifdef IN_RING0
1754 /*
1755 * Only if the caller requests to update the host MSR value AND we've newly added the
1756 * MSR to the host MSR area do we actually update the value. Otherwise, it will be
1757 * updated by vmxHCUpdateAutoLoadHostMsrs().
1758 *
1759 * We do this for performance reasons since reading MSRs may be quite expensive.
1760 */
1761 if (fAdded)
1762 {
1763 if (fUpdateHostMsr)
1764 {
1765 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1766 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1767 pHostMsr[i].u64Value = ASMRdMsr(idMsr);
1768 }
1769 else
1770 {
1771 /* Someone else can do the work. */
1772 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1773 }
1774 }
1775#else
1776 RT_NOREF(fUpdateHostMsr);
1777#endif
1778 return VINF_SUCCESS;
1779}
1780
1781
1782/**
1783 * Removes a guest/host MSR pair to be swapped during the world-switch from the
1784 * auto-load/store MSR area in the VMCS.
1785 *
1786 * @returns VBox status code.
1787 * @param pVCpu The cross context virtual CPU structure.
1788 * @param pVmxTransient The VMX-transient structure.
1789 * @param idMsr The MSR.
1790 */
1791static int vmxHCRemoveAutoLoadStoreMsr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t idMsr)
1792{
1793 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1794 bool const fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
1795 PVMXAUTOMSR pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
1796 uint32_t cMsrs = pVmcsInfo->cEntryMsrLoad;
1797
1798#ifndef DEBUG_bird
1799 LogFlowFunc(("pVCpu=%p idMsr=%#RX32\n", pVCpu, idMsr));
1800#endif
1801
1802 for (uint32_t i = 0; i < cMsrs; i++)
1803 {
1804 /* Find the MSR. */
1805 if (pGuestMsrLoad[i].u32Msr == idMsr)
1806 {
1807 /*
1808 * If it's the last MSR, we only need to reduce the MSR count.
1809 * If it's -not- the last MSR, copy the last MSR in place of it and reduce the MSR count.
1810 */
1811 if (i < cMsrs - 1)
1812 {
1813 /* Remove it from the VM-entry MSR-load area. */
1814 pGuestMsrLoad[i].u32Msr = pGuestMsrLoad[cMsrs - 1].u32Msr;
1815 pGuestMsrLoad[i].u64Value = pGuestMsrLoad[cMsrs - 1].u64Value;
1816
1817 /* Remove it from the VM-exit MSR-store area if it's in a different page. */
1818 if (vmxHCIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
1819 {
1820 PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
1821 Assert(pGuestMsrStore[i].u32Msr == idMsr);
1822 pGuestMsrStore[i].u32Msr = pGuestMsrStore[cMsrs - 1].u32Msr;
1823 pGuestMsrStore[i].u64Value = pGuestMsrStore[cMsrs - 1].u64Value;
1824 }
1825
1826 /* Remove it from the VM-exit MSR-load area. */
1827 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
1828 Assert(pHostMsr[i].u32Msr == idMsr);
1829 pHostMsr[i].u32Msr = pHostMsr[cMsrs - 1].u32Msr;
1830 pHostMsr[i].u64Value = pHostMsr[cMsrs - 1].u64Value;
1831 }
1832
1833 /* Reduce the count to reflect the removed MSR and bail. */
1834 --cMsrs;
1835 break;
1836 }
1837 }
1838
1839 /* Update the VMCS if the count changed (meaning the MSR was found and removed). */
1840 if (cMsrs != pVmcsInfo->cEntryMsrLoad)
1841 {
1842 int rc = vmxHCSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
1843 AssertRCReturn(rc, rc);
1844
1845 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
1846 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
1847 vmxHCSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
1848
1849 Log4Func(("Removed MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
1850 return VINF_SUCCESS;
1851 }
1852
1853 return VERR_NOT_FOUND;
1854}
1855
1856
1857/**
1858 * Checks if the specified guest MSR is part of the VM-entry MSR-load area.
1859 *
1860 * @returns @c true if found, @c false otherwise.
1861 * @param pVmcsInfo The VMCS info. object.
1862 * @param idMsr The MSR to find.
1863 */
1864static bool vmxHCIsAutoLoadGuestMsr(PCVMXVMCSINFO pVmcsInfo, uint32_t idMsr)
1865{
1866 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
1867 uint32_t const cMsrs = pVmcsInfo->cEntryMsrLoad;
1868 Assert(pMsrs);
1869 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
1870 for (uint32_t i = 0; i < cMsrs; i++)
1871 {
1872 if (pMsrs[i].u32Msr == idMsr)
1873 return true;
1874 }
1875 return false;
1876}
1877
1878
1879/**
1880 * Verifies that our cached values of the VMCS fields are all consistent with
1881 * what's actually present in the VMCS.
1882 *
1883 * @returns VBox status code.
1884 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1885 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1886 * VMCS content. HMCPU error-field is
1887 * updated, see VMX_VCI_XXX.
1888 * @param pVCpu The cross context virtual CPU structure.
1889 * @param pVmcsInfo The VMCS info. object.
1890 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1891 */
1892static int vmxHCCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1893{
1894 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
1895
1896 uint32_t u32Val;
1897 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
1898 AssertRC(rc);
1899 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
1900 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
1901 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
1902 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1903
1904 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT, &u32Val);
1905 AssertRC(rc);
1906 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
1907 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
1908 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
1909 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1910
1911 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1912 AssertRC(rc);
1913 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
1914 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
1915 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1916 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1917
1918 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1919 AssertRC(rc);
1920 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
1921 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
1922 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1923 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1924
1925 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1926 {
1927 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1928 AssertRC(rc);
1929 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
1930 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
1931 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1932 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1933 }
1934
1935 uint64_t u64Val;
1936 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1937 {
1938 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
1939 AssertRC(rc);
1940 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
1941 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
1942 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
1943 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1944 }
1945
1946 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1947 AssertRC(rc);
1948 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
1949 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
1950 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1951 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1952
1953 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1954 AssertRC(rc);
1955 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
1956 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
1957 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1958 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1959
1960 NOREF(pcszVmcs);
1961 return VINF_SUCCESS;
1962}
1963
1964
1965#ifdef IN_RING0
1966/**
1967 * Sets up the LBR MSR ranges based on the host CPU.
1968 *
1969 * @returns VBox status code.
1970 * @param pVM The cross context VM structure.
1971 */
1972static int vmxHCSetupLbrMsrRange(PVMCC pVM)
1973{
1974 Assert(VM_IS_VMX_LBR(pVM));
1975 uint32_t idLbrFromIpMsrFirst;
1976 uint32_t idLbrFromIpMsrLast;
1977 uint32_t idLbrToIpMsrFirst;
1978 uint32_t idLbrToIpMsrLast;
1979 uint32_t idLbrTosMsr;
1980
1981 /*
1982 * Determine the LBR MSRs supported for this host CPU family and model.
1983 *
1984 * See Intel spec. 17.4.8 "LBR Stack".
1985 * See Intel "Model-Specific Registers" spec.
1986 */
1987 uint32_t const uFamilyModel = (pVM->cpum.ro.HostFeatures.uFamily << 8)
1988 | pVM->cpum.ro.HostFeatures.uModel;
1989 switch (uFamilyModel)
1990 {
1991 case 0x0f01: case 0x0f02:
1992 idLbrFromIpMsrFirst = MSR_P4_LASTBRANCH_0;
1993 idLbrFromIpMsrLast = MSR_P4_LASTBRANCH_3;
1994 idLbrToIpMsrFirst = 0x0;
1995 idLbrToIpMsrLast = 0x0;
1996 idLbrTosMsr = MSR_P4_LASTBRANCH_TOS;
1997 break;
1998
1999 case 0x065c: case 0x065f: case 0x064e: case 0x065e: case 0x068e:
2000 case 0x069e: case 0x0655: case 0x0666: case 0x067a: case 0x0667:
2001 case 0x066a: case 0x066c: case 0x067d: case 0x067e:
2002 idLbrFromIpMsrFirst = MSR_LASTBRANCH_0_FROM_IP;
2003 idLbrFromIpMsrLast = MSR_LASTBRANCH_31_FROM_IP;
2004 idLbrToIpMsrFirst = MSR_LASTBRANCH_0_TO_IP;
2005 idLbrToIpMsrLast = MSR_LASTBRANCH_31_TO_IP;
2006 idLbrTosMsr = MSR_LASTBRANCH_TOS;
2007 break;
2008
2009 case 0x063d: case 0x0647: case 0x064f: case 0x0656: case 0x063c:
2010 case 0x0645: case 0x0646: case 0x063f: case 0x062a: case 0x062d:
2011 case 0x063a: case 0x063e: case 0x061a: case 0x061e: case 0x061f:
2012 case 0x062e: case 0x0625: case 0x062c: case 0x062f:
2013 idLbrFromIpMsrFirst = MSR_LASTBRANCH_0_FROM_IP;
2014 idLbrFromIpMsrLast = MSR_LASTBRANCH_15_FROM_IP;
2015 idLbrToIpMsrFirst = MSR_LASTBRANCH_0_TO_IP;
2016 idLbrToIpMsrLast = MSR_LASTBRANCH_15_TO_IP;
2017 idLbrTosMsr = MSR_LASTBRANCH_TOS;
2018 break;
2019
2020 case 0x0617: case 0x061d: case 0x060f:
2021 idLbrFromIpMsrFirst = MSR_CORE2_LASTBRANCH_0_FROM_IP;
2022 idLbrFromIpMsrLast = MSR_CORE2_LASTBRANCH_3_FROM_IP;
2023 idLbrToIpMsrFirst = MSR_CORE2_LASTBRANCH_0_TO_IP;
2024 idLbrToIpMsrLast = MSR_CORE2_LASTBRANCH_3_TO_IP;
2025 idLbrTosMsr = MSR_CORE2_LASTBRANCH_TOS;
2026 break;
2027
2028 /* Atom and related microarchitectures we don't care about:
2029 case 0x0637: case 0x064a: case 0x064c: case 0x064d: case 0x065a:
2030 case 0x065d: case 0x061c: case 0x0626: case 0x0627: case 0x0635:
2031 case 0x0636: */
2032 /* All other CPUs: */
2033 default:
2034 {
2035 LogRelFunc(("Could not determine LBR stack size for the CPU model %#x\n", uFamilyModel));
2036 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_LBR_STACK_SIZE_UNKNOWN;
2037 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2038 }
2039 }
2040
2041 /*
2042 * Validate.
2043 */
2044 uint32_t const cLbrStack = idLbrFromIpMsrLast - idLbrFromIpMsrFirst + 1;
2045 PCVMCPU pVCpu0 = VMCC_GET_CPU_0(pVM);
2046 AssertCompile( RT_ELEMENTS(pVCpu0->hm.s.vmx.VmcsInfo.au64LbrFromIpMsr)
2047 == RT_ELEMENTS(pVCpu0->hm.s.vmx.VmcsInfo.au64LbrToIpMsr));
2048 if (cLbrStack > RT_ELEMENTS(pVCpu0->hm.s.vmx.VmcsInfo.au64LbrFromIpMsr))
2049 {
2050 LogRelFunc(("LBR stack size of the CPU (%u) exceeds our buffer size\n", cLbrStack));
2051 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_LBR_STACK_SIZE_OVERFLOW;
2052 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2053 }
2054 NOREF(pVCpu0);
2055
2056 /*
2057 * Update the LBR info. to the VM struct. for use later.
2058 */
2059 pVM->hmr0.s.vmx.idLbrTosMsr = idLbrTosMsr;
2060
2061 pVM->hm.s.ForR3.vmx.idLbrFromIpMsrFirst = pVM->hmr0.s.vmx.idLbrFromIpMsrFirst = idLbrFromIpMsrFirst;
2062 pVM->hm.s.ForR3.vmx.idLbrFromIpMsrLast = pVM->hmr0.s.vmx.idLbrFromIpMsrLast = idLbrFromIpMsrLast;
2063
2064 pVM->hm.s.ForR3.vmx.idLbrToIpMsrFirst = pVM->hmr0.s.vmx.idLbrToIpMsrFirst = idLbrToIpMsrFirst;
2065 pVM->hm.s.ForR3.vmx.idLbrToIpMsrLast = pVM->hmr0.s.vmx.idLbrToIpMsrLast = idLbrToIpMsrLast;
2066 return VINF_SUCCESS;
2067}
2068
2069
2070#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2071/**
2072 * Sets up the shadow VMCS fields arrays.
2073 *
2074 * This function builds arrays of VMCS fields to sync the shadow VMCS later while
2075 * executing the guest.
2076 *
2077 * @returns VBox status code.
2078 * @param pVM The cross context VM structure.
2079 */
2080static int vmxHCSetupShadowVmcsFieldsArrays(PVMCC pVM)
2081{
2082 /*
2083 * Paranoia. Ensure we haven't exposed the VMWRITE-All VMX feature to the guest
2084 * when the host does not support it.
2085 */
2086 bool const fGstVmwriteAll = pVM->cpum.ro.GuestFeatures.fVmxVmwriteAll;
2087 if ( !fGstVmwriteAll
2088 || (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL))
2089 { /* likely. */ }
2090 else
2091 {
2092 LogRelFunc(("VMX VMWRITE-All feature exposed to the guest but host CPU does not support it!\n"));
2093 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_GST_HOST_VMWRITE_ALL;
2094 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2095 }
2096
2097 uint32_t const cVmcsFields = RT_ELEMENTS(g_aVmcsFields);
2098 uint32_t cRwFields = 0;
2099 uint32_t cRoFields = 0;
2100 for (uint32_t i = 0; i < cVmcsFields; i++)
2101 {
2102 VMXVMCSFIELD VmcsField;
2103 VmcsField.u = g_aVmcsFields[i];
2104
2105 /*
2106 * We will be writing "FULL" (64-bit) fields while syncing the shadow VMCS.
2107 * Therefore, "HIGH" (32-bit portion of 64-bit) fields must not be included
2108 * in the shadow VMCS fields array as they would be redundant.
2109 *
2110 * If the VMCS field depends on a CPU feature that is not exposed to the guest,
2111 * we must not include it in the shadow VMCS fields array. Guests attempting to
2112 * VMREAD/VMWRITE such VMCS fields would cause a VM-exit and we shall emulate
2113 * the required behavior.
2114 */
2115 if ( VmcsField.n.fAccessType == VMX_VMCSFIELD_ACCESS_FULL
2116 && CPUMIsGuestVmxVmcsFieldValid(pVM, VmcsField.u))
2117 {
2118 /*
2119 * Read-only fields are placed in a separate array so that while syncing shadow
2120 * VMCS fields later (which is more performance critical) we can avoid branches.
2121 *
2122 * However, if the guest can write to all fields (including read-only fields),
2123 * we treat it a as read/write field. Otherwise, writing to these fields would
2124 * cause a VMWRITE instruction error while syncing the shadow VMCS.
2125 */
2126 if ( fGstVmwriteAll
2127 || !VMXIsVmcsFieldReadOnly(VmcsField.u))
2128 pVM->hmr0.s.vmx.paShadowVmcsFields[cRwFields++] = VmcsField.u;
2129 else
2130 pVM->hmr0.s.vmx.paShadowVmcsRoFields[cRoFields++] = VmcsField.u;
2131 }
2132 }
2133
2134 /* Update the counts. */
2135 pVM->hmr0.s.vmx.cShadowVmcsFields = cRwFields;
2136 pVM->hmr0.s.vmx.cShadowVmcsRoFields = cRoFields;
2137 return VINF_SUCCESS;
2138}
2139
2140
2141/**
2142 * Sets up the VMREAD and VMWRITE bitmaps.
2143 *
2144 * @param pVM The cross context VM structure.
2145 */
2146static void vmxHCSetupVmreadVmwriteBitmaps(PVMCC pVM)
2147{
2148 /*
2149 * By default, ensure guest attempts to access any VMCS fields cause VM-exits.
2150 */
2151 uint32_t const cbBitmap = X86_PAGE_4K_SIZE;
2152 uint8_t *pbVmreadBitmap = (uint8_t *)pVM->hmr0.s.vmx.pvVmreadBitmap;
2153 uint8_t *pbVmwriteBitmap = (uint8_t *)pVM->hmr0.s.vmx.pvVmwriteBitmap;
2154 ASMMemFill32(pbVmreadBitmap, cbBitmap, UINT32_C(0xffffffff));
2155 ASMMemFill32(pbVmwriteBitmap, cbBitmap, UINT32_C(0xffffffff));
2156
2157 /*
2158 * Skip intercepting VMREAD/VMWRITE to guest read/write fields in the
2159 * VMREAD and VMWRITE bitmaps.
2160 */
2161 {
2162 uint32_t const *paShadowVmcsFields = pVM->hmr0.s.vmx.paShadowVmcsFields;
2163 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
2164 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
2165 {
2166 uint32_t const uVmcsField = paShadowVmcsFields[i];
2167 Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK));
2168 Assert(uVmcsField >> 3 < cbBitmap);
2169 ASMBitClear(pbVmreadBitmap + (uVmcsField >> 3), uVmcsField & 7);
2170 ASMBitClear(pbVmwriteBitmap + (uVmcsField >> 3), uVmcsField & 7);
2171 }
2172 }
2173
2174 /*
2175 * Skip intercepting VMREAD for guest read-only fields in the VMREAD bitmap
2176 * if the host supports VMWRITE to all supported VMCS fields.
2177 */
2178 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
2179 {
2180 uint32_t const *paShadowVmcsRoFields = pVM->hmr0.s.vmx.paShadowVmcsRoFields;
2181 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
2182 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
2183 {
2184 uint32_t const uVmcsField = paShadowVmcsRoFields[i];
2185 Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK));
2186 Assert(uVmcsField >> 3 < cbBitmap);
2187 ASMBitClear(pbVmreadBitmap + (uVmcsField >> 3), uVmcsField & 7);
2188 }
2189 }
2190}
2191#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
2192
2193
2194/**
2195 * Sets up the APIC-access page address for the VMCS.
2196 *
2197 * @param pVCpu The cross context virtual CPU structure.
2198 */
2199DECLINLINE(void) vmxHCSetupVmcsApicAccessAddr(PVMCPUCC pVCpu)
2200{
2201 RTHCPHYS const HCPhysApicAccess = pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.HCPhysApicAccess;
2202 Assert(HCPhysApicAccess != NIL_RTHCPHYS);
2203 Assert(!(HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
2204 int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, HCPhysApicAccess);
2205 AssertRC(rc);
2206}
2207
2208#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2209
2210/**
2211 * Sets up the VMREAD bitmap address for the VMCS.
2212 *
2213 * @param pVCpu The cross context virtual CPU structure.
2214 */
2215DECLINLINE(void) vmxHCSetupVmcsVmreadBitmapAddr(PVMCPUCC pVCpu)
2216{
2217 RTHCPHYS const HCPhysVmreadBitmap = pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.HCPhysVmreadBitmap;
2218 Assert(HCPhysVmreadBitmap != NIL_RTHCPHYS);
2219 Assert(!(HCPhysVmreadBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2220 int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL, HCPhysVmreadBitmap);
2221 AssertRC(rc);
2222}
2223
2224
2225/**
2226 * Sets up the VMWRITE bitmap address for the VMCS.
2227 *
2228 * @param pVCpu The cross context virtual CPU structure.
2229 */
2230DECLINLINE(void) vmxHCSetupVmcsVmwriteBitmapAddr(PVMCPUCC pVCpu)
2231{
2232 RTHCPHYS const HCPhysVmwriteBitmap = pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.HCPhysVmwriteBitmap;
2233 Assert(HCPhysVmwriteBitmap != NIL_RTHCPHYS);
2234 Assert(!(HCPhysVmwriteBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2235 int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL, HCPhysVmwriteBitmap);
2236 AssertRC(rc);
2237}
2238
2239#endif
2240
2241/**
2242 * Sets up MSR permissions in the MSR bitmap of a VMCS info. object.
2243 *
2244 * @param pVCpu The cross context virtual CPU structure.
2245 * @param pVmcsInfo The VMCS info. object.
2246 */
2247static void vmxHCSetupVmcsMsrPermissions(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2248{
2249 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS);
2250
2251 /*
2252 * By default, ensure guest attempts to access any MSR cause VM-exits.
2253 * This shall later be relaxed for specific MSRs as necessary.
2254 *
2255 * Note: For nested-guests, the entire bitmap will be merged prior to
2256 * executing the nested-guest using hardware-assisted VMX and hence there
2257 * is no need to perform this operation. See vmxHCMergeMsrBitmapNested.
2258 */
2259 Assert(pVmcsInfo->pvMsrBitmap);
2260 ASMMemFill32(pVmcsInfo->pvMsrBitmap, X86_PAGE_4K_SIZE, UINT32_C(0xffffffff));
2261
2262 /*
2263 * The guest can access the following MSRs (read, write) without causing
2264 * VM-exits; they are loaded/stored automatically using fields in the VMCS.
2265 */
2266 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2267 vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SYSENTER_CS, VMXMSRPM_ALLOW_RD_WR);
2268 vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SYSENTER_ESP, VMXMSRPM_ALLOW_RD_WR);
2269 vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SYSENTER_EIP, VMXMSRPM_ALLOW_RD_WR);
2270 vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_GS_BASE, VMXMSRPM_ALLOW_RD_WR);
2271 vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_FS_BASE, VMXMSRPM_ALLOW_RD_WR);
2272
2273 /*
2274 * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state
2275 * associated with then. We never need to intercept access (writes need to be
2276 * executed without causing a VM-exit, reads will #GP fault anyway).
2277 *
2278 * The IA32_SPEC_CTRL MSR is read/write and has state. We allow the guest to
2279 * read/write them. We swap the guest/host MSR value using the
2280 * auto-load/store MSR area.
2281 */
2282 if (pVM->cpum.ro.GuestFeatures.fIbpb)
2283 vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_PRED_CMD, VMXMSRPM_ALLOW_RD_WR);
2284 if (pVM->cpum.ro.GuestFeatures.fFlushCmd)
2285 vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_FLUSH_CMD, VMXMSRPM_ALLOW_RD_WR);
2286 if (pVM->cpum.ro.GuestFeatures.fIbrs)
2287 vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SPEC_CTRL, VMXMSRPM_ALLOW_RD_WR);
2288
2289 /*
2290 * Allow full read/write access for the following MSRs (mandatory for VT-x)
2291 * required for 64-bit guests.
2292 */
2293 if (pVM->hmr0.s.fAllow64BitGuests)
2294 {
2295 vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_LSTAR, VMXMSRPM_ALLOW_RD_WR);
2296 vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K6_STAR, VMXMSRPM_ALLOW_RD_WR);
2297 vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_SF_MASK, VMXMSRPM_ALLOW_RD_WR);
2298 vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_KERNEL_GS_BASE, VMXMSRPM_ALLOW_RD_WR);
2299 }
2300
2301 /*
2302 * IA32_EFER MSR is always intercepted, see @bugref{9180#c37}.
2303 */
2304#ifdef VBOX_STRICT
2305 Assert(pVmcsInfo->pvMsrBitmap);
2306 uint32_t const fMsrpmEfer = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, MSR_K6_EFER);
2307 Assert(fMsrpmEfer == VMXMSRPM_EXIT_RD_WR);
2308#endif
2309}
2310
2311
2312/**
2313 * Sets up pin-based VM-execution controls in the VMCS.
2314 *
2315 * @returns VBox status code.
2316 * @param pVCpu The cross context virtual CPU structure.
2317 * @param pVmcsInfo The VMCS info. object.
2318 */
2319static int vmxHCSetupVmcsPinCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2320{
2321 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2322 uint32_t fVal = g_HmMsrs.u.vmx.PinCtls.n.allowed0; /* Bits set here must always be set. */
2323 uint32_t const fZap = g_HmMsrs.u.vmx.PinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
2324
2325 fVal |= VMX_PIN_CTLS_EXT_INT_EXIT /* External interrupts cause a VM-exit. */
2326 | VMX_PIN_CTLS_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */
2327
2328 if (g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_VIRT_NMI)
2329 fVal |= VMX_PIN_CTLS_VIRT_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
2330
2331 /* Enable the VMX-preemption timer. */
2332 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
2333 {
2334 Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER);
2335 fVal |= VMX_PIN_CTLS_PREEMPT_TIMER;
2336 }
2337
2338#if 0
2339 /* Enable posted-interrupt processing. */
2340 if (pVM->hm.s.fPostedIntrs)
2341 {
2342 Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_POSTED_INT);
2343 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT);
2344 fVal |= VMX_PIN_CTLS_POSTED_INT;
2345 }
2346#endif
2347
2348 if ((fVal & fZap) != fVal)
2349 {
2350 LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
2351 g_HmMsrs.u.vmx.PinCtls.n.allowed0, fVal, fZap));
2352 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PIN_EXEC;
2353 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2354 }
2355
2356 /* Commit it to the VMCS and update our cache. */
2357 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, fVal);
2358 AssertRC(rc);
2359 pVmcsInfo->u32PinCtls = fVal;
2360
2361 return VINF_SUCCESS;
2362}
2363
2364
2365/**
2366 * Sets up secondary processor-based VM-execution controls in the VMCS.
2367 *
2368 * @returns VBox status code.
2369 * @param pVCpu The cross context virtual CPU structure.
2370 * @param pVmcsInfo The VMCS info. object.
2371 */
2372static int vmxHCSetupVmcsProcCtls2(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2373{
2374 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2375 uint32_t fVal = g_HmMsrs.u.vmx.ProcCtls2.n.allowed0; /* Bits set here must be set in the VMCS. */
2376 uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2377
2378 /* WBINVD causes a VM-exit. */
2379 if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_WBINVD_EXIT)
2380 fVal |= VMX_PROC_CTLS2_WBINVD_EXIT;
2381
2382 /* Enable EPT (aka nested-paging). */
2383 if (VM_IS_VMX_NESTED_PAGING(pVM))
2384 fVal |= VMX_PROC_CTLS2_EPT;
2385
2386 /* Enable the INVPCID instruction if we expose it to the guest and is supported
2387 by the hardware. Without this, guest executing INVPCID would cause a #UD. */
2388 if ( pVM->cpum.ro.GuestFeatures.fInvpcid
2389 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_INVPCID))
2390 fVal |= VMX_PROC_CTLS2_INVPCID;
2391
2392 /* Enable VPID. */
2393 if (pVM->hmr0.s.vmx.fVpid)
2394 fVal |= VMX_PROC_CTLS2_VPID;
2395
2396 /* Enable unrestricted guest execution. */
2397 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
2398 fVal |= VMX_PROC_CTLS2_UNRESTRICTED_GUEST;
2399
2400#if 0
2401 if (pVM->hm.s.fVirtApicRegs)
2402 {
2403 /* Enable APIC-register virtualization. */
2404 Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT);
2405 fVal |= VMX_PROC_CTLS2_APIC_REG_VIRT;
2406
2407 /* Enable virtual-interrupt delivery. */
2408 Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY);
2409 fVal |= VMX_PROC_CTLS2_VIRT_INTR_DELIVERY;
2410 }
2411#endif
2412
2413 /* Virtualize-APIC accesses if supported by the CPU. The virtual-APIC page is
2414 where the TPR shadow resides. */
2415 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
2416 * done dynamically. */
2417 if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
2418 {
2419 fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS;
2420 vmxHCSetupVmcsApicAccessAddr(pVCpu);
2421 }
2422
2423 /* Enable the RDTSCP instruction if we expose it to the guest and is supported
2424 by the hardware. Without this, guest executing RDTSCP would cause a #UD. */
2425 if ( pVM->cpum.ro.GuestFeatures.fRdTscP
2426 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP))
2427 fVal |= VMX_PROC_CTLS2_RDTSCP;
2428
2429 /* Enable Pause-Loop exiting. */
2430 if ( (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
2431 && pVM->hm.s.vmx.cPleGapTicks
2432 && pVM->hm.s.vmx.cPleWindowTicks)
2433 {
2434 fVal |= VMX_PROC_CTLS2_PAUSE_LOOP_EXIT;
2435
2436 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks); AssertRC(rc);
2437 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks); AssertRC(rc);
2438 }
2439
2440 if ((fVal & fZap) != fVal)
2441 {
2442 LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
2443 g_HmMsrs.u.vmx.ProcCtls2.n.allowed0, fVal, fZap));
2444 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
2445 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2446 }
2447
2448 /* Commit it to the VMCS and update our cache. */
2449 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
2450 AssertRC(rc);
2451 pVmcsInfo->u32ProcCtls2 = fVal;
2452
2453 return VINF_SUCCESS;
2454}
2455
2456
2457/**
2458 * Sets up processor-based VM-execution controls in the VMCS.
2459 *
2460 * @returns VBox status code.
2461 * @param pVCpu The cross context virtual CPU structure.
2462 * @param pVmcsInfo The VMCS info. object.
2463 */
2464static int vmxHCSetupVmcsProcCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2465{
2466 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2467 uint32_t fVal = g_HmMsrs.u.vmx.ProcCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
2468 uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2469
2470 fVal |= VMX_PROC_CTLS_HLT_EXIT /* HLT causes a VM-exit. */
2471 | VMX_PROC_CTLS_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
2472 | VMX_PROC_CTLS_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
2473 | VMX_PROC_CTLS_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
2474 | VMX_PROC_CTLS_RDPMC_EXIT /* RDPMC causes a VM-exit. */
2475 | VMX_PROC_CTLS_MONITOR_EXIT /* MONITOR causes a VM-exit. */
2476 | VMX_PROC_CTLS_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
2477
2478 /* We toggle VMX_PROC_CTLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
2479 if ( !(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MOV_DR_EXIT)
2480 || (g_HmMsrs.u.vmx.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT))
2481 {
2482 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
2483 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2484 }
2485
2486 /* Without nested paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
2487 if (!VM_IS_VMX_NESTED_PAGING(pVM))
2488 {
2489 Assert(!VM_IS_VMX_UNRESTRICTED_GUEST(pVM));
2490 fVal |= VMX_PROC_CTLS_INVLPG_EXIT
2491 | VMX_PROC_CTLS_CR3_LOAD_EXIT
2492 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2493 }
2494
2495#ifdef IN_INRG0
2496 /* Use TPR shadowing if supported by the CPU. */
2497 if ( PDMHasApic(pVM)
2498 && (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW))
2499 {
2500 fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
2501 /* CR8 writes cause a VM-exit based on TPR threshold. */
2502 Assert(!(fVal & VMX_PROC_CTLS_CR8_STORE_EXIT));
2503 Assert(!(fVal & VMX_PROC_CTLS_CR8_LOAD_EXIT));
2504 vmxHCSetupVmcsVirtApicAddr(pVmcsInfo);
2505 }
2506 else
2507 {
2508 /* Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is
2509 invalid on 32-bit Intel CPUs. Set this control only for 64-bit guests. */
2510 if (pVM->hmr0.s.fAllow64BitGuests)
2511 fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
2512 | VMX_PROC_CTLS_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
2513 }
2514
2515 /* Use MSR-bitmaps if supported by the CPU. */
2516 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
2517 {
2518 fVal |= VMX_PROC_CTLS_USE_MSR_BITMAPS;
2519 vmxHCSetupVmcsMsrBitmapAddr(pVmcsInfo);
2520 }
2521#endif
2522
2523 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
2524 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
2525 fVal |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
2526
2527 if ((fVal & fZap) != fVal)
2528 {
2529 LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
2530 g_HmMsrs.u.vmx.ProcCtls.n.allowed0, fVal, fZap));
2531 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_EXEC;
2532 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2533 }
2534
2535 /* Commit it to the VMCS and update our cache. */
2536 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, fVal);
2537 AssertRC(rc);
2538 pVmcsInfo->u32ProcCtls = fVal;
2539
2540 /* Set up MSR permissions that don't change through the lifetime of the VM. */
2541 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
2542 vmxHCSetupVmcsMsrPermissions(pVCpu, pVmcsInfo);
2543
2544 /* Set up secondary processor-based VM-execution controls if the CPU supports it. */
2545 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
2546 return vmxHCSetupVmcsProcCtls2(pVCpu, pVmcsInfo);
2547
2548 /* Sanity check, should not really happen. */
2549 if (RT_LIKELY(!VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
2550 { /* likely */ }
2551 else
2552 {
2553 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_INVALID_UX_COMBO;
2554 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2555 }
2556
2557 /* Old CPUs without secondary processor-based VM-execution controls would end up here. */
2558 return VINF_SUCCESS;
2559}
2560
2561
2562/**
2563 * Sets up miscellaneous (everything other than Pin, Processor and secondary
2564 * Processor-based VM-execution) control fields in the VMCS.
2565 *
2566 * @returns VBox status code.
2567 * @param pVCpu The cross context virtual CPU structure.
2568 * @param pVmcsInfo The VMCS info. object.
2569 */
2570static int vmxHCSetupVmcsMiscCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2571{
2572#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2573 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUseVmcsShadowing)
2574 {
2575 vmxHCSetupVmcsVmreadBitmapAddr(pVCpu);
2576 vmxHCSetupVmcsVmwriteBitmapAddr(pVCpu);
2577 }
2578#endif
2579
2580 Assert(pVmcsInfo->u64VmcsLinkPtr == NIL_RTHCPHYS);
2581 int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS);
2582 AssertRC(rc);
2583
2584 rc = vmxHCSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo);
2585 if (RT_SUCCESS(rc))
2586 {
2587 uint64_t const u64Cr0Mask = vmxHCGetFixedCr0Mask(pVCpu);
2588 uint64_t const u64Cr4Mask = vmxHCGetFixedCr4Mask(pVCpu);
2589
2590 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask); AssertRC(rc);
2591 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask); AssertRC(rc);
2592
2593 pVmcsInfo->u64Cr0Mask = u64Cr0Mask;
2594 pVmcsInfo->u64Cr4Mask = u64Cr4Mask;
2595
2596 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fLbr)
2597 {
2598 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, MSR_IA32_DEBUGCTL_LBR);
2599 AssertRC(rc);
2600 }
2601 return VINF_SUCCESS;
2602 }
2603 else
2604 LogRelFunc(("Failed to initialize VMCS auto-load/store MSR addresses. rc=%Rrc\n", rc));
2605 return rc;
2606}
2607
2608
2609/**
2610 * Sets up the initial exception bitmap in the VMCS based on static conditions.
2611 *
2612 * We shall setup those exception intercepts that don't change during the
2613 * lifetime of the VM here. The rest are done dynamically while loading the
2614 * guest state.
2615 *
2616 * @param pVCpu The cross context virtual CPU structure.
2617 * @param pVmcsInfo The VMCS info. object.
2618 */
2619static void vmxHCSetupVmcsXcptBitmap(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2620{
2621 /*
2622 * The following exceptions are always intercepted:
2623 *
2624 * #AC - To prevent the guest from hanging the CPU and for dealing with
2625 * split-lock detecting host configs.
2626 * #DB - To maintain the DR6 state even when intercepting DRx reads/writes and
2627 * recursive #DBs can cause a CPU hang.
2628 * #PF - To sync our shadow page tables when nested-paging is not used.
2629 */
2630 bool const fNestedPaging = pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging;
2631 uint32_t const uXcptBitmap = RT_BIT(X86_XCPT_AC)
2632 | RT_BIT(X86_XCPT_DB)
2633 | (fNestedPaging ? 0 : RT_BIT(X86_XCPT_PF));
2634
2635 /* Commit it to the VMCS. */
2636 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2637 AssertRC(rc);
2638
2639 /* Update our cache of the exception bitmap. */
2640 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2641}
2642
2643
2644#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2645/**
2646 * Sets up the VMCS for executing a nested-guest using hardware-assisted VMX.
2647 *
2648 * @returns VBox status code.
2649 * @param pVmcsInfo The VMCS info. object.
2650 */
2651static int vmxHCSetupVmcsCtlsNested(PVMXVMCSINFO pVmcsInfo)
2652{
2653 Assert(pVmcsInfo->u64VmcsLinkPtr == NIL_RTHCPHYS);
2654 int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS);
2655 AssertRC(rc);
2656
2657 rc = vmxHCSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo);
2658 if (RT_SUCCESS(rc))
2659 {
2660 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
2661 vmxHCSetupVmcsMsrBitmapAddr(pVmcsInfo);
2662
2663 /* Paranoia - We've not yet initialized these, they shall be done while merging the VMCS. */
2664 Assert(!pVmcsInfo->u64Cr0Mask);
2665 Assert(!pVmcsInfo->u64Cr4Mask);
2666 return VINF_SUCCESS;
2667 }
2668 LogRelFunc(("Failed to set up the VMCS link pointer in the nested-guest VMCS. rc=%Rrc\n", rc));
2669 return rc;
2670}
2671#endif
2672#endif /* !IN_RING0 */
2673
2674
2675/**
2676 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
2677 * VMCS.
2678 *
2679 * This is typically required when the guest changes paging mode.
2680 *
2681 * @returns VBox status code.
2682 * @param pVCpu The cross context virtual CPU structure.
2683 * @param pVmxTransient The VMX-transient structure.
2684 *
2685 * @remarks Requires EFER.
2686 * @remarks No-long-jump zone!!!
2687 */
2688static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2689{
2690 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
2691 {
2692 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2693 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2694
2695 /*
2696 * VM-entry controls.
2697 */
2698 {
2699 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
2700 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2701
2702 /*
2703 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
2704 * The first VT-x capable CPUs only supported the 1-setting of this bit.
2705 *
2706 * For nested-guests, this is a mandatory VM-entry control. It's also
2707 * required because we do not want to leak host bits to the nested-guest.
2708 */
2709 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
2710
2711 /*
2712 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
2713 *
2714 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
2715 * required to get the nested-guest working with hardware-assisted VMX execution.
2716 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
2717 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
2718 * here rather than while merging the guest VMCS controls.
2719 */
2720 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
2721 {
2722 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
2723 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
2724 }
2725 else
2726 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
2727
2728 /*
2729 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
2730 *
2731 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
2732 * regardless of whether the nested-guest VMCS specifies it because we are free to
2733 * load whatever MSRs we require and we do not need to modify the guest visible copy
2734 * of the VM-entry MSR load area.
2735 */
2736 if ( g_fHmVmxSupportsVmcsEfer
2737 && vmxHCShouldSwapEferMsr(pVCpu, pVmxTransient))
2738 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
2739 else
2740 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
2741
2742 /*
2743 * The following should -not- be set (since we're not in SMM mode):
2744 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
2745 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
2746 */
2747
2748 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
2749 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
2750
2751 if ((fVal & fZap) == fVal)
2752 { /* likely */ }
2753 else
2754 {
2755 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
2756 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
2757 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
2758 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2759 }
2760
2761 /* Commit it to the VMCS. */
2762 if (pVmcsInfo->u32EntryCtls != fVal)
2763 {
2764 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, fVal);
2765 AssertRC(rc);
2766 pVmcsInfo->u32EntryCtls = fVal;
2767 }
2768 }
2769
2770 /*
2771 * VM-exit controls.
2772 */
2773 {
2774 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
2775 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2776
2777 /*
2778 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
2779 * supported the 1-setting of this bit.
2780 *
2781 * For nested-guests, we set the "save debug controls" as the converse
2782 * "load debug controls" is mandatory for nested-guests anyway.
2783 */
2784 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
2785
2786 /*
2787 * Set the host long mode active (EFER.LMA) bit (which Intel calls
2788 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
2789 * host EFER.LMA and EFER.LME bit to this value. See assertion in
2790 * vmxHCExportHostMsrs().
2791 *
2792 * For nested-guests, we always set this bit as we do not support 32-bit
2793 * hosts.
2794 */
2795 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
2796
2797#ifdef IN_RING0
2798 /*
2799 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
2800 *
2801 * For nested-guests, we should use the "save IA32_EFER" control if we also
2802 * used the "load IA32_EFER" control while exporting VM-entry controls.
2803 */
2804 if ( g_fHmVmxSupportsVmcsEfer
2805 && vmxHCShouldSwapEferMsr(pVCpu, pVmxTransient))
2806 {
2807 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
2808 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
2809 }
2810#endif
2811
2812 /*
2813 * Enable saving of the VMX-preemption timer value on VM-exit.
2814 * For nested-guests, currently not exposed/used.
2815 */
2816 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
2817 * the timer value. */
2818 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
2819 {
2820 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
2821 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
2822 }
2823
2824 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
2825 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
2826
2827 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
2828 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
2829 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
2830
2831 if ((fVal & fZap) == fVal)
2832 { /* likely */ }
2833 else
2834 {
2835 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
2836 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
2837 VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
2838 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2839 }
2840
2841 /* Commit it to the VMCS. */
2842 if (pVmcsInfo->u32ExitCtls != fVal)
2843 {
2844 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT, fVal);
2845 AssertRC(rc);
2846 pVmcsInfo->u32ExitCtls = fVal;
2847 }
2848 }
2849
2850 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
2851 }
2852 return VINF_SUCCESS;
2853}
2854
2855
2856/**
2857 * Sets the TPR threshold in the VMCS.
2858 *
2859 * @param pVCpu The cross context virtual CPU structure.
2860 * @param pVmcsInfo The VMCS info. object.
2861 * @param u32TprThreshold The TPR threshold (task-priority class only).
2862 */
2863DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
2864{
2865 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
2866 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
2867 RT_NOREF(pVmcsInfo);
2868 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
2869 AssertRC(rc);
2870}
2871
2872
2873/**
2874 * Exports the guest APIC TPR state into the VMCS.
2875 *
2876 * @param pVCpu The cross context virtual CPU structure.
2877 * @param pVmxTransient The VMX-transient structure.
2878 *
2879 * @remarks No-long-jump zone!!!
2880 */
2881static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
2882{
2883 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
2884 {
2885 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
2886
2887 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2888 if (!pVmxTransient->fIsNestedGuest)
2889 {
2890 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
2891 && APICIsEnabled(pVCpu))
2892 {
2893 /*
2894 * Setup TPR shadowing.
2895 */
2896 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
2897 {
2898 bool fPendingIntr = false;
2899 uint8_t u8Tpr = 0;
2900 uint8_t u8PendingIntr = 0;
2901 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
2902 AssertRC(rc);
2903
2904 /*
2905 * If there are interrupts pending but masked by the TPR, instruct VT-x to
2906 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
2907 * priority of the pending interrupt so we can deliver the interrupt. If there
2908 * are no interrupts pending, set threshold to 0 to not cause any
2909 * TPR-below-threshold VM-exits.
2910 */
2911 uint32_t u32TprThreshold = 0;
2912 if (fPendingIntr)
2913 {
2914 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
2915 (which is the Task-Priority Class). */
2916 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
2917 const uint8_t u8TprPriority = u8Tpr >> 4;
2918 if (u8PendingPriority <= u8TprPriority)
2919 u32TprThreshold = u8PendingPriority;
2920 }
2921
2922 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
2923 }
2924 }
2925 }
2926 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
2927 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
2928 }
2929}
2930
2931
2932/**
2933 * Gets the guest interruptibility-state and updates related force-flags.
2934 *
2935 * @returns Guest's interruptibility-state.
2936 * @param pVCpu The cross context virtual CPU structure.
2937 *
2938 * @remarks No-long-jump zone!!!
2939 */
2940static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
2941{
2942 /*
2943 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
2944 */
2945 uint32_t fIntrState = 0;
2946 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2947 {
2948 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
2949 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
2950
2951 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2952 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
2953 {
2954 if (pCtx->eflags.Bits.u1IF)
2955 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
2956 else
2957 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
2958 }
2959 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2960 {
2961 /*
2962 * We can clear the inhibit force flag as even if we go back to the recompiler
2963 * without executing guest code in VT-x, the flag's condition to be cleared is
2964 * met and thus the cleared state is correct.
2965 */
2966 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2967 }
2968 }
2969
2970 /*
2971 * Check if we should inhibit NMI delivery.
2972 */
2973 if (CPUMIsGuestNmiBlocking(pVCpu))
2974 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
2975
2976 /*
2977 * Validate.
2978 */
2979#ifdef VBOX_STRICT
2980 /* We don't support block-by-SMI yet.*/
2981 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
2982
2983 /* Block-by-STI must not be set when interrupts are disabled. */
2984 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
2985 {
2986 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
2987 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
2988 }
2989#endif
2990
2991 return fIntrState;
2992}
2993
2994
2995/**
2996 * Exports the exception intercepts required for guest execution in the VMCS.
2997 *
2998 * @param pVCpu The cross context virtual CPU structure.
2999 * @param pVmxTransient The VMX-transient structure.
3000 *
3001 * @remarks No-long-jump zone!!!
3002 */
3003static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
3004{
3005 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
3006 {
3007 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
3008 if ( !pVmxTransient->fIsNestedGuest
3009 && VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
3010 vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
3011 else
3012 vmxHCRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
3013
3014 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
3015 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
3016 }
3017}
3018
3019
3020/**
3021 * Exports the guest's RIP into the guest-state area in the VMCS.
3022 *
3023 * @param pVCpu The cross context virtual CPU structure.
3024 *
3025 * @remarks No-long-jump zone!!!
3026 */
3027static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
3028{
3029 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
3030 {
3031 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
3032
3033 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
3034 AssertRC(rc);
3035
3036 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
3037 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
3038 }
3039}
3040
3041
3042/**
3043 * Exports the guest's RSP into the guest-state area in the VMCS.
3044 *
3045 * @param pVCpu The cross context virtual CPU structure.
3046 *
3047 * @remarks No-long-jump zone!!!
3048 */
3049static void vmxHCExportGuestRsp(PVMCPUCC pVCpu)
3050{
3051 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RSP)
3052 {
3053 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP);
3054
3055 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RSP, pVCpu->cpum.GstCtx.rsp);
3056 AssertRC(rc);
3057
3058 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RSP);
3059 Log4Func(("rsp=%#RX64\n", pVCpu->cpum.GstCtx.rsp));
3060 }
3061}
3062
3063
3064/**
3065 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
3066 *
3067 * @param pVCpu The cross context virtual CPU structure.
3068 * @param pVmxTransient The VMX-transient structure.
3069 *
3070 * @remarks No-long-jump zone!!!
3071 */
3072static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
3073{
3074 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
3075 {
3076 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
3077
3078 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
3079 Let us assert it as such and use 32-bit VMWRITE. */
3080 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
3081 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
3082 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
3083 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
3084
3085#ifdef IN_RING0
3086 /*
3087 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
3088 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
3089 * can run the real-mode guest code under Virtual 8086 mode.
3090 */
3091 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
3092 if (pVmcsInfo->RealMode.fRealOnV86Active)
3093 {
3094 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3095 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3096 Assert(!pVmxTransient->fIsNestedGuest);
3097 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
3098 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
3099 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
3100 }
3101#else
3102 RT_NOREF(pVmxTransient);
3103#endif
3104
3105 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
3106 AssertRC(rc);
3107
3108 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
3109 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
3110 }
3111}
3112
3113
3114#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3115/**
3116 * Copies the nested-guest VMCS to the shadow VMCS.
3117 *
3118 * @returns VBox status code.
3119 * @param pVCpu The cross context virtual CPU structure.
3120 * @param pVmcsInfo The VMCS info. object.
3121 *
3122 * @remarks No-long-jump zone!!!
3123 */
3124static int vmxHCCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
3125{
3126 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3127 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3128
3129 /*
3130 * Disable interrupts so we don't get preempted while the shadow VMCS is the
3131 * current VMCS, as we may try saving guest lazy MSRs.
3132 *
3133 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
3134 * calling the import VMCS code which is currently performing the guest MSR reads
3135 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
3136 * and the rest of the VMX leave session machinery.
3137 */
3138 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
3139
3140 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
3141 if (RT_SUCCESS(rc))
3142 {
3143 /*
3144 * Copy all guest read/write VMCS fields.
3145 *
3146 * We don't check for VMWRITE failures here for performance reasons and
3147 * because they are not expected to fail, barring irrecoverable conditions
3148 * like hardware errors.
3149 */
3150 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
3151 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
3152 {
3153 uint64_t u64Val;
3154 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
3155 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
3156 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
3157 }
3158
3159 /*
3160 * If the host CPU supports writing all VMCS fields, copy the guest read-only
3161 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
3162 */
3163 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
3164 {
3165 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
3166 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
3167 {
3168 uint64_t u64Val;
3169 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
3170 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
3171 VMX_VMCS_WRITE_64(pVCpu, uVmcsField, u64Val);
3172 }
3173 }
3174
3175 rc = vmxHCClearShadowVmcs(pVmcsInfo);
3176 rc |= vmxHCLoadVmcs(pVmcsInfo);
3177 }
3178
3179 ASMSetFlags(fEFlags);
3180 return rc;
3181}
3182
3183
3184/**
3185 * Copies the shadow VMCS to the nested-guest VMCS.
3186 *
3187 * @returns VBox status code.
3188 * @param pVCpu The cross context virtual CPU structure.
3189 * @param pVmcsInfo The VMCS info. object.
3190 *
3191 * @remarks Called with interrupts disabled.
3192 */
3193static int vmxHCCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
3194{
3195 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3196 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3197 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3198
3199 int rc = vmxHCLoadShadowVmcs(pVmcsInfo);
3200 if (RT_SUCCESS(rc))
3201 {
3202 /*
3203 * Copy guest read/write fields from the shadow VMCS.
3204 * Guest read-only fields cannot be modified, so no need to copy them.
3205 *
3206 * We don't check for VMREAD failures here for performance reasons and
3207 * because they are not expected to fail, barring irrecoverable conditions
3208 * like hardware errors.
3209 */
3210 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
3211 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
3212 {
3213 uint64_t u64Val;
3214 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
3215 VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
3216 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
3217 }
3218
3219 rc = vmxHCClearShadowVmcs(pVmcsInfo);
3220 rc |= vmxHCLoadVmcs(pVmcsInfo);
3221 }
3222 return rc;
3223}
3224
3225
3226/**
3227 * Enables VMCS shadowing for the given VMCS info. object.
3228 *
3229 * @param pVmcsInfo The VMCS info. object.
3230 *
3231 * @remarks No-long-jump zone!!!
3232 */
3233static void vmxHCEnableVmcsShadowing(PVMXVMCSINFO pVmcsInfo)
3234{
3235 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
3236 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
3237 {
3238 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
3239 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
3240 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
3241 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
3242 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
3243 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
3244 Log4Func(("Enabled\n"));
3245 }
3246}
3247
3248
3249/**
3250 * Disables VMCS shadowing for the given VMCS info. object.
3251 *
3252 * @param pVmcsInfo The VMCS info. object.
3253 *
3254 * @remarks No-long-jump zone!!!
3255 */
3256static void vmxHCDisableVmcsShadowing(PVMXVMCSINFO pVmcsInfo)
3257{
3258 /*
3259 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
3260 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
3261 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
3262 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
3263 *
3264 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
3265 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
3266 */
3267 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
3268 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3269 {
3270 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
3271 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
3272 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
3273 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
3274 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
3275 Log4Func(("Disabled\n"));
3276 }
3277}
3278#endif
3279
3280
3281/**
3282 * Exports the guest hardware-virtualization state.
3283 *
3284 * @returns VBox status code.
3285 * @param pVCpu The cross context virtual CPU structure.
3286 * @param pVmxTransient The VMX-transient structure.
3287 *
3288 * @remarks No-long-jump zone!!!
3289 */
3290static int vmxHCExportGuestHwvirtState(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
3291{
3292 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_HWVIRT)
3293 {
3294#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3295 /*
3296 * Check if the VMX feature is exposed to the guest and if the host CPU supports
3297 * VMCS shadowing.
3298 */
3299 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUseVmcsShadowing)
3300 {
3301 /*
3302 * If the nested hypervisor has loaded a current VMCS and is in VMX root mode,
3303 * copy the nested hypervisor's current VMCS into the shadow VMCS and enable
3304 * VMCS shadowing to skip intercepting some or all VMREAD/VMWRITE VM-exits.
3305 *
3306 * We check for VMX root mode here in case the guest executes VMXOFF without
3307 * clearing the current VMCS pointer and our VMXOFF instruction emulation does
3308 * not clear the current VMCS pointer.
3309 */
3310 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
3311 if ( CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx)
3312 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
3313 && CPUMIsGuestVmxCurrentVmcsValid(&pVCpu->cpum.GstCtx))
3314 {
3315 /* Paranoia. */
3316 Assert(!pVmxTransient->fIsNestedGuest);
3317
3318 /*
3319 * For performance reasons, also check if the nested hypervisor's current VMCS
3320 * was newly loaded or modified before copying it to the shadow VMCS.
3321 */
3322 if (!VCPU_2_VMXSTATE(pVCpu).vmx.fCopiedNstGstToShadowVmcs)
3323 {
3324 int rc = vmxHCCopyNstGstToShadowVmcs(pVCpu, pVmcsInfo);
3325 AssertRCReturn(rc, rc);
3326 VCPU_2_VMXSTATE(pVCpu).vmx.fCopiedNstGstToShadowVmcs = true;
3327 }
3328 vmxHCEnableVmcsShadowing(pVmcsInfo);
3329 }
3330 else
3331 vmxHCDisableVmcsShadowing(pVmcsInfo);
3332 }
3333#else
3334 NOREF(pVmxTransient);
3335#endif
3336 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_HWVIRT);
3337 }
3338 return VINF_SUCCESS;
3339}
3340
3341
3342/**
3343 * Exports the guest CR0 control register into the guest-state area in the VMCS.
3344 *
3345 * The guest FPU state is always pre-loaded hence we don't need to bother about
3346 * sharing FPU related CR0 bits between the guest and host.
3347 *
3348 * @returns VBox status code.
3349 * @param pVCpu The cross context virtual CPU structure.
3350 * @param pVmxTransient The VMX-transient structure.
3351 *
3352 * @remarks No-long-jump zone!!!
3353 */
3354static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
3355{
3356 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
3357 {
3358 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3359 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
3360
3361 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
3362 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
3363 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
3364 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
3365 else
3366 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
3367
3368 if (!pVmxTransient->fIsNestedGuest)
3369 {
3370 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
3371 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
3372 uint64_t const u64ShadowCr0 = u64GuestCr0;
3373 Assert(!RT_HI_U32(u64GuestCr0));
3374
3375 /*
3376 * Setup VT-x's view of the guest CR0.
3377 */
3378 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
3379 if (VM_IS_VMX_NESTED_PAGING(pVM))
3380 {
3381 if (CPUMIsGuestPagingEnabled(pVCpu))
3382 {
3383 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
3384 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
3385 | VMX_PROC_CTLS_CR3_STORE_EXIT);
3386 }
3387 else
3388 {
3389 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
3390 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
3391 | VMX_PROC_CTLS_CR3_STORE_EXIT;
3392 }
3393
3394 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
3395 if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
3396 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
3397 }
3398 else
3399 {
3400 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
3401 u64GuestCr0 |= X86_CR0_WP;
3402 }
3403
3404 /*
3405 * Guest FPU bits.
3406 *
3407 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
3408 * using CR0.TS.
3409 *
3410 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
3411 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
3412 */
3413 u64GuestCr0 |= X86_CR0_NE;
3414
3415 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
3416 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
3417
3418 /*
3419 * Update exception intercepts.
3420 */
3421 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
3422#ifdef IN_RING0
3423 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3424 {
3425 Assert(PDMVmmDevHeapIsEnabled(pVM));
3426 Assert(pVM->hm.s.vmx.pRealModeTSS);
3427 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3428 }
3429 else
3430#endif
3431 {
3432 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
3433 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3434 if (fInterceptMF)
3435 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
3436 }
3437
3438 /* Additional intercepts for debugging, define these yourself explicitly. */
3439#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3440 uXcptBitmap |= 0
3441 | RT_BIT(X86_XCPT_BP)
3442 | RT_BIT(X86_XCPT_DE)
3443 | RT_BIT(X86_XCPT_NM)
3444 | RT_BIT(X86_XCPT_TS)
3445 | RT_BIT(X86_XCPT_UD)
3446 | RT_BIT(X86_XCPT_NP)
3447 | RT_BIT(X86_XCPT_SS)
3448 | RT_BIT(X86_XCPT_GP)
3449 | RT_BIT(X86_XCPT_PF)
3450 | RT_BIT(X86_XCPT_MF)
3451 ;
3452#elif defined(HMVMX_ALWAYS_TRAP_PF)
3453 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
3454#endif
3455 if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
3456 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
3457 Assert(VM_IS_VMX_NESTED_PAGING(pVM) || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
3458
3459 /* Apply the hardware specified CR0 fixed bits and enable caching. */
3460 u64GuestCr0 |= fSetCr0;
3461 u64GuestCr0 &= fZapCr0;
3462 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
3463
3464 /* Commit the CR0 and related fields to the guest VMCS. */
3465 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
3466 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
3467 if (uProcCtls != pVmcsInfo->u32ProcCtls)
3468 {
3469 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
3470 AssertRC(rc);
3471 }
3472 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
3473 {
3474 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
3475 AssertRC(rc);
3476 }
3477
3478 /* Update our caches. */
3479 pVmcsInfo->u32ProcCtls = uProcCtls;
3480 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
3481
3482 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
3483 }
3484 else
3485 {
3486 /*
3487 * With nested-guests, we may have extended the guest/host mask here since we
3488 * merged in the outer guest's mask. Thus, the merged mask can include more bits
3489 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
3490 * originally supplied. We must copy those bits from the nested-guest CR0 into
3491 * the nested-guest CR0 read-shadow.
3492 */
3493 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
3494 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
3495 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
3496 Assert(!RT_HI_U32(u64GuestCr0));
3497 Assert(u64GuestCr0 & X86_CR0_NE);
3498
3499 /* Apply the hardware specified CR0 fixed bits and enable caching. */
3500 u64GuestCr0 |= fSetCr0;
3501 u64GuestCr0 &= fZapCr0;
3502 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
3503
3504 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
3505 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
3506 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
3507
3508 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
3509 }
3510
3511 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
3512 }
3513
3514 return VINF_SUCCESS;
3515}
3516
3517
3518/**
3519 * Exports the guest control registers (CR3, CR4) into the guest-state area
3520 * in the VMCS.
3521 *
3522 * @returns VBox strict status code.
3523 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
3524 * without unrestricted guest access and the VMMDev is not presently
3525 * mapped (e.g. EFI32).
3526 *
3527 * @param pVCpu The cross context virtual CPU structure.
3528 * @param pVmxTransient The VMX-transient structure.
3529 *
3530 * @remarks No-long-jump zone!!!
3531 */
3532static VBOXSTRICTRC vmxHCExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
3533{
3534 int rc = VINF_SUCCESS;
3535 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3536
3537 /*
3538 * Guest CR2.
3539 * It's always loaded in the assembler code. Nothing to do here.
3540 */
3541
3542 /*
3543 * Guest CR3.
3544 */
3545 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
3546 {
3547 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
3548
3549 if (VM_IS_VMX_NESTED_PAGING(pVM))
3550 {
3551#ifdef IN_RING0
3552 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
3553 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3554
3555 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3556 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
3557 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3558 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
3559
3560 /* VMX_EPT_MEMTYPE_WB support is already checked in vmxHCSetupTaggedTlb(). */
3561 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
3562 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
3563
3564 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3565 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3566 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
3567 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
3568 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
3569 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
3570 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
3571
3572 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
3573 AssertRC(rc);
3574#endif
3575
3576 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3577 uint64_t u64GuestCr3 = pCtx->cr3;
3578 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3579 || CPUMIsGuestPagingEnabledEx(pCtx))
3580 {
3581 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3582 if (CPUMIsGuestInPAEModeEx(pCtx))
3583 {
3584 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
3585 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
3586 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
3587 rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
3588 }
3589
3590 /*
3591 * The guest's view of its CR3 is unblemished with nested paging when the
3592 * guest is using paging or we have unrestricted guest execution to handle
3593 * the guest when it's not using paging.
3594 */
3595 }
3596#ifdef IN_RING0
3597 else
3598 {
3599 /*
3600 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
3601 * thinks it accesses physical memory directly, we use our identity-mapped
3602 * page table to map guest-linear to guest-physical addresses. EPT takes care
3603 * of translating it to host-physical addresses.
3604 */
3605 RTGCPHYS GCPhys;
3606 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3607
3608 /* We obtain it here every time as the guest could have relocated this PCI region. */
3609 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3610 if (RT_SUCCESS(rc))
3611 { /* likely */ }
3612 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
3613 {
3614 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
3615 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
3616 }
3617 else
3618 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
3619
3620 u64GuestCr3 = GCPhys;
3621 }
3622#endif
3623
3624 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
3625 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, u64GuestCr3);
3626 AssertRC(rc);
3627 }
3628 else
3629 {
3630 Assert(!pVmxTransient->fIsNestedGuest);
3631 /* Non-nested paging case, just use the hypervisor's CR3. */
3632 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
3633
3634 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
3635 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
3636 AssertRC(rc);
3637 }
3638
3639 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
3640 }
3641
3642 /*
3643 * Guest CR4.
3644 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
3645 */
3646 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
3647 {
3648 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3649 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
3650
3651 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
3652 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
3653
3654 /*
3655 * With nested-guests, we may have extended the guest/host mask here (since we
3656 * merged in the outer guest's mask, see vmxHCMergeVmcsNested). This means, the
3657 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
3658 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
3659 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
3660 */
3661 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
3662 uint64_t u64GuestCr4 = pCtx->cr4;
3663 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
3664 ? pCtx->cr4
3665 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
3666 Assert(!RT_HI_U32(u64GuestCr4));
3667
3668#ifdef IN_RING0
3669 /*
3670 * Setup VT-x's view of the guest CR4.
3671 *
3672 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
3673 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
3674 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
3675 *
3676 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
3677 */
3678 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
3679 {
3680 Assert(pVM->hm.s.vmx.pRealModeTSS);
3681 Assert(PDMVmmDevHeapIsEnabled(pVM));
3682 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
3683 }
3684#endif
3685
3686 if (VM_IS_VMX_NESTED_PAGING(pVM))
3687 {
3688 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
3689 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
3690 {
3691 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
3692 u64GuestCr4 |= X86_CR4_PSE;
3693 /* Our identity mapping is a 32-bit page directory. */
3694 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
3695 }
3696 /* else use guest CR4.*/
3697 }
3698 else
3699 {
3700 Assert(!pVmxTransient->fIsNestedGuest);
3701
3702 /*
3703 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
3704 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
3705 */
3706 switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
3707 {
3708 case PGMMODE_REAL: /* Real-mode. */
3709 case PGMMODE_PROTECTED: /* Protected mode without paging. */
3710 case PGMMODE_32_BIT: /* 32-bit paging. */
3711 {
3712 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
3713 break;
3714 }
3715
3716 case PGMMODE_PAE: /* PAE paging. */
3717 case PGMMODE_PAE_NX: /* PAE paging with NX. */
3718 {
3719 u64GuestCr4 |= X86_CR4_PAE;
3720 break;
3721 }
3722
3723 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
3724 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
3725 {
3726#ifdef VBOX_WITH_64_BITS_GUESTS
3727 /* For our assumption in vmxHCShouldSwapEferMsr. */
3728 Assert(u64GuestCr4 & X86_CR4_PAE);
3729 break;
3730#endif
3731 }
3732 default:
3733 AssertFailed();
3734 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3735 }
3736 }
3737
3738 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
3739 u64GuestCr4 |= fSetCr4;
3740 u64GuestCr4 &= fZapCr4;
3741
3742 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
3743 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
3744 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
3745
3746#ifdef IN_RING0
3747 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
3748 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
3749 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
3750 {
3751 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
3752 vmxHCUpdateStartVmFunction(pVCpu);
3753 }
3754#endif
3755
3756 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
3757
3758 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
3759 }
3760 return rc;
3761}
3762
3763
3764/**
3765 * Exports the guest debug registers into the guest-state area in the VMCS.
3766 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
3767 *
3768 * This also sets up whether \#DB and MOV DRx accesses cause VM-exits.
3769 *
3770 * @returns VBox status code.
3771 * @param pVCpu The cross context virtual CPU structure.
3772 * @param pVmxTransient The VMX-transient structure.
3773 *
3774 * @remarks No-long-jump zone!!!
3775 */
3776static int vmxHCExportSharedDebugState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
3777{
3778#ifdef IN_RING0
3779 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3780#endif
3781
3782 /** @todo NSTVMX: Figure out what we want to do with nested-guest instruction
3783 * stepping. */
3784 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
3785#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3786 if (pVmxTransient->fIsNestedGuest)
3787 {
3788 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, CPUMGetGuestDR7(pVCpu));
3789 AssertRC(rc);
3790
3791 /*
3792 * We don't want to always intercept MOV DRx for nested-guests as it causes
3793 * problems when the nested hypervisor isn't intercepting them, see @bugref{10080}.
3794 * Instead, they are strictly only requested when the nested hypervisor intercepts
3795 * them -- handled while merging VMCS controls.
3796 *
3797 * If neither the outer nor the nested-hypervisor is intercepting MOV DRx,
3798 * then the nested-guest debug state should be actively loaded on the host so that
3799 * nested-guest reads its own debug registers without causing VM-exits.
3800 */
3801 if ( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT)
3802 && !CPUMIsGuestDebugStateActive(pVCpu))
3803 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
3804 return VINF_SUCCESS;
3805 }
3806#endif
3807
3808#ifdef VBOX_STRICT
3809 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
3810 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
3811 {
3812 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
3813 Assert((pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0);
3814 Assert((pVCpu->cpum.GstCtx.dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK);
3815 }
3816#endif
3817
3818#ifdef IN_RING0 /** @todo */
3819 bool fSteppingDB = false;
3820 bool fInterceptMovDRx = false;
3821 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
3822 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
3823 {
3824 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
3825 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)
3826 {
3827 uProcCtls |= VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
3828 Assert(fSteppingDB == false);
3829 }
3830 else
3831 {
3832 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_TF;
3833 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
3834 pVCpu->hmr0.s.fClearTrapFlag = true;
3835 fSteppingDB = true;
3836 }
3837 }
3838
3839 uint64_t u64GuestDr7;
3840 if ( fSteppingDB
3841 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
3842 {
3843 /*
3844 * Use the combined guest and host DRx values found in the hypervisor register set
3845 * because the hypervisor debugger has breakpoints active or someone is single stepping
3846 * on the host side without a monitor trap flag.
3847 *
3848 * Note! DBGF expects a clean DR6 state before executing guest code.
3849 */
3850 if (!CPUMIsHyperDebugStateActive(pVCpu))
3851 {
3852 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
3853 Assert(CPUMIsHyperDebugStateActive(pVCpu));
3854 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
3855 }
3856
3857 /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */
3858 u64GuestDr7 = CPUMGetHyperDR7(pVCpu);
3859 pVCpu->hmr0.s.fUsingHyperDR7 = true;
3860 fInterceptMovDRx = true;
3861 }
3862 else
3863 {
3864 /*
3865 * If the guest has enabled debug registers, we need to load them prior to
3866 * executing guest code so they'll trigger at the right time.
3867 */
3868 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3869 if (pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
3870 {
3871 if (!CPUMIsGuestDebugStateActive(pVCpu))
3872 {
3873 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
3874 Assert(CPUMIsGuestDebugStateActive(pVCpu));
3875 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
3876 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxArmed);
3877 }
3878 Assert(!fInterceptMovDRx);
3879 }
3880 else if (!CPUMIsGuestDebugStateActive(pVCpu))
3881 {
3882 /*
3883 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
3884 * must intercept #DB in order to maintain a correct DR6 guest value, and
3885 * because we need to intercept it to prevent nested #DBs from hanging the
3886 * CPU, we end up always having to intercept it. See vmxHCSetupVmcsXcptBitmap().
3887 */
3888 fInterceptMovDRx = true;
3889 }
3890
3891 /* Update DR7 with the actual guest value. */
3892 u64GuestDr7 = pVCpu->cpum.GstCtx.dr[7];
3893 pVCpu->hmr0.s.fUsingHyperDR7 = false;
3894 }
3895
3896 if (fInterceptMovDRx)
3897 uProcCtls |= VMX_PROC_CTLS_MOV_DR_EXIT;
3898 else
3899 uProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
3900
3901 /*
3902 * Update the processor-based VM-execution controls with the MOV-DRx intercepts and the
3903 * monitor-trap flag and update our cache.
3904 */
3905 if (uProcCtls != pVmcsInfo->u32ProcCtls)
3906 {
3907 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
3908 AssertRC(rc);
3909 pVmcsInfo->u32ProcCtls = uProcCtls;
3910 }
3911
3912 /*
3913 * Update guest DR7.
3914 */
3915 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, u64GuestDr7);
3916 AssertRC(rc);
3917
3918 /*
3919 * If we have forced EFLAGS.TF to be set because we're single-stepping in the hypervisor debugger,
3920 * we need to clear interrupt inhibition if any as otherwise it causes a VM-entry failure.
3921 *
3922 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
3923 */
3924 if (fSteppingDB)
3925 {
3926 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
3927 Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1TF);
3928
3929 uint32_t fIntrState = 0;
3930 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
3931 AssertRC(rc);
3932
3933 if (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
3934 {
3935 fIntrState &= ~(VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
3936 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
3937 AssertRC(rc);
3938 }
3939 }
3940#endif /* !IN_RING0 */
3941
3942 return VINF_SUCCESS;
3943}
3944
3945
3946#ifdef VBOX_STRICT
3947/**
3948 * Strict function to validate segment registers.
3949 *
3950 * @param pVCpu The cross context virtual CPU structure.
3951 * @param pVmcsInfo The VMCS info. object.
3952 *
3953 * @remarks Will import guest CR0 on strict builds during validation of
3954 * segments.
3955 */
3956static void vmxHCValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
3957{
3958 /*
3959 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
3960 *
3961 * The reason we check for attribute value 0 in this function and not just the unusable bit is
3962 * because vmxHCExportGuestSegReg() only updates the VMCS' copy of the value with the
3963 * unusable bit and doesn't change the guest-context value.
3964 */
3965 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3966 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3967 vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
3968 if ( !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
3969 && ( !CPUMIsGuestInRealModeEx(pCtx)
3970 && !CPUMIsGuestInV86ModeEx(pCtx)))
3971 {
3972 /* Protected mode checks */
3973 /* CS */
3974 Assert(pCtx->cs.Attr.n.u1Present);
3975 Assert(!(pCtx->cs.Attr.u & 0xf00));
3976 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
3977 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
3978 || !(pCtx->cs.Attr.n.u1Granularity));
3979 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
3980 || (pCtx->cs.Attr.n.u1Granularity));
3981 /* CS cannot be loaded with NULL in protected mode. */
3982 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
3983 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
3984 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
3985 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
3986 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
3987 else
3988 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
3989 /* SS */
3990 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3991 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
3992 if ( !(pCtx->cr0 & X86_CR0_PE)
3993 || pCtx->cs.Attr.n.u4Type == 3)
3994 {
3995 Assert(!pCtx->ss.Attr.n.u2Dpl);
3996 }
3997 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
3998 {
3999 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4000 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
4001 Assert(pCtx->ss.Attr.n.u1Present);
4002 Assert(!(pCtx->ss.Attr.u & 0xf00));
4003 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
4004 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4005 || !(pCtx->ss.Attr.n.u1Granularity));
4006 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
4007 || (pCtx->ss.Attr.n.u1Granularity));
4008 }
4009 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSegReg(). */
4010 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4011 {
4012 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4013 Assert(pCtx->ds.Attr.n.u1Present);
4014 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
4015 Assert(!(pCtx->ds.Attr.u & 0xf00));
4016 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
4017 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4018 || !(pCtx->ds.Attr.n.u1Granularity));
4019 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
4020 || (pCtx->ds.Attr.n.u1Granularity));
4021 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4022 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
4023 }
4024 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4025 {
4026 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4027 Assert(pCtx->es.Attr.n.u1Present);
4028 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
4029 Assert(!(pCtx->es.Attr.u & 0xf00));
4030 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
4031 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
4032 || !(pCtx->es.Attr.n.u1Granularity));
4033 Assert( !(pCtx->es.u32Limit & 0xfff00000)
4034 || (pCtx->es.Attr.n.u1Granularity));
4035 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4036 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
4037 }
4038 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4039 {
4040 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4041 Assert(pCtx->fs.Attr.n.u1Present);
4042 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
4043 Assert(!(pCtx->fs.Attr.u & 0xf00));
4044 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
4045 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
4046 || !(pCtx->fs.Attr.n.u1Granularity));
4047 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
4048 || (pCtx->fs.Attr.n.u1Granularity));
4049 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4050 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4051 }
4052 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
4053 {
4054 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4055 Assert(pCtx->gs.Attr.n.u1Present);
4056 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
4057 Assert(!(pCtx->gs.Attr.u & 0xf00));
4058 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
4059 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
4060 || !(pCtx->gs.Attr.n.u1Granularity));
4061 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
4062 || (pCtx->gs.Attr.n.u1Granularity));
4063 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4064 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4065 }
4066 /* 64-bit capable CPUs. */
4067 Assert(!RT_HI_U32(pCtx->cs.u64Base));
4068 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
4069 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
4070 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
4071 }
4072 else if ( CPUMIsGuestInV86ModeEx(pCtx)
4073 || ( CPUMIsGuestInRealModeEx(pCtx)
4074 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
4075 {
4076 /* Real and v86 mode checks. */
4077 /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
4078 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
4079#ifdef IN_RING0
4080 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
4081 {
4082 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
4083 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
4084 }
4085 else
4086#endif
4087 {
4088 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
4089 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
4090 }
4091
4092 /* CS */
4093 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
4094 Assert(pCtx->cs.u32Limit == 0xffff);
4095 Assert(u32CSAttr == 0xf3);
4096 /* SS */
4097 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
4098 Assert(pCtx->ss.u32Limit == 0xffff);
4099 Assert(u32SSAttr == 0xf3);
4100 /* DS */
4101 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
4102 Assert(pCtx->ds.u32Limit == 0xffff);
4103 Assert(u32DSAttr == 0xf3);
4104 /* ES */
4105 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
4106 Assert(pCtx->es.u32Limit == 0xffff);
4107 Assert(u32ESAttr == 0xf3);
4108 /* FS */
4109 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
4110 Assert(pCtx->fs.u32Limit == 0xffff);
4111 Assert(u32FSAttr == 0xf3);
4112 /* GS */
4113 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
4114 Assert(pCtx->gs.u32Limit == 0xffff);
4115 Assert(u32GSAttr == 0xf3);
4116 /* 64-bit capable CPUs. */
4117 Assert(!RT_HI_U32(pCtx->cs.u64Base));
4118 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
4119 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
4120 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
4121 }
4122}
4123#endif /* VBOX_STRICT */
4124
4125
4126/**
4127 * Exports a guest segment register into the guest-state area in the VMCS.
4128 *
4129 * @returns VBox status code.
4130 * @param pVCpu The cross context virtual CPU structure.
4131 * @param pVmcsInfo The VMCS info. object.
4132 * @param iSegReg The segment register number (X86_SREG_XXX).
4133 * @param pSelReg Pointer to the segment selector.
4134 *
4135 * @remarks No-long-jump zone!!!
4136 */
4137static int vmxHCExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
4138{
4139 Assert(iSegReg < X86_SREG_COUNT);
4140
4141 uint32_t u32Access = pSelReg->Attr.u;
4142#ifdef IN_RING0
4143 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
4144#endif
4145 {
4146 /*
4147 * The way to differentiate between whether this is really a null selector or was just
4148 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
4149 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
4150 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
4151 * NULL selectors loaded in protected-mode have their attribute as 0.
4152 */
4153 if (u32Access)
4154 { }
4155 else
4156 u32Access = X86DESCATTR_UNUSABLE;
4157 }
4158#ifdef IN_RING0
4159 else
4160 {
4161 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
4162 u32Access = 0xf3;
4163 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4164 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
4165 RT_NOREF_PV(pVCpu);
4166 }
4167#else
4168 RT_NOREF(pVmcsInfo);
4169#endif
4170
4171 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
4172 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
4173 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
4174
4175 /*
4176 * Commit it to the VMCS.
4177 */
4178 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
4179 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
4180 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
4181 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
4182 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
4183 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
4184 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
4185 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
4186 return VINF_SUCCESS;
4187}
4188
4189
4190/**
4191 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
4192 * area in the VMCS.
4193 *
4194 * @returns VBox status code.
4195 * @param pVCpu The cross context virtual CPU structure.
4196 * @param pVmxTransient The VMX-transient structure.
4197 *
4198 * @remarks Will import guest CR0 on strict builds during validation of
4199 * segments.
4200 * @remarks No-long-jump zone!!!
4201 */
4202static int vmxHCExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
4203{
4204 int rc = VERR_INTERNAL_ERROR_5;
4205#ifdef IN_RING0
4206 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4207#endif
4208 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4209 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
4210#ifdef IN_RING0
4211 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
4212#endif
4213
4214 /*
4215 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
4216 */
4217 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
4218 {
4219 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
4220 {
4221 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
4222#ifdef IN_RING0
4223 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
4224 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
4225#endif
4226 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
4227 AssertRC(rc);
4228 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
4229 }
4230
4231 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
4232 {
4233 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
4234#ifdef IN_RING0
4235 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
4236 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
4237#endif
4238 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
4239 AssertRC(rc);
4240 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
4241 }
4242
4243 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
4244 {
4245 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
4246#ifdef IN_RING0
4247 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
4248 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
4249#endif
4250 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
4251 AssertRC(rc);
4252 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
4253 }
4254
4255 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
4256 {
4257 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
4258#ifdef IN_RING0
4259 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
4260 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
4261#endif
4262 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
4263 AssertRC(rc);
4264 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
4265 }
4266
4267 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
4268 {
4269 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
4270#ifdef IN_RING0
4271 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
4272 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
4273#endif
4274 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
4275 AssertRC(rc);
4276 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
4277 }
4278
4279 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
4280 {
4281 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
4282#ifdef IN_RING0
4283 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
4284 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
4285#endif
4286 rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
4287 AssertRC(rc);
4288 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
4289 }
4290
4291#ifdef VBOX_STRICT
4292 vmxHCValidateSegmentRegs(pVCpu, pVmcsInfo);
4293#endif
4294 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
4295 pCtx->cs.Attr.u));
4296 }
4297
4298 /*
4299 * Guest TR.
4300 */
4301 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
4302 {
4303 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
4304
4305 /*
4306 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
4307 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
4308 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
4309 */
4310 uint16_t u16Sel;
4311 uint32_t u32Limit;
4312 uint64_t u64Base;
4313 uint32_t u32AccessRights;
4314#ifdef IN_RING0
4315 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
4316#endif
4317 {
4318 u16Sel = pCtx->tr.Sel;
4319 u32Limit = pCtx->tr.u32Limit;
4320 u64Base = pCtx->tr.u64Base;
4321 u32AccessRights = pCtx->tr.Attr.u;
4322 }
4323#ifdef IN_RING0
4324 else
4325 {
4326 Assert(!pVmxTransient->fIsNestedGuest);
4327 Assert(pVM->hm.s.vmx.pRealModeTSS);
4328 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
4329
4330 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
4331 RTGCPHYS GCPhys;
4332 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
4333 AssertRCReturn(rc, rc);
4334
4335 X86DESCATTR DescAttr;
4336 DescAttr.u = 0;
4337 DescAttr.n.u1Present = 1;
4338 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
4339
4340 u16Sel = 0;
4341 u32Limit = HM_VTX_TSS_SIZE;
4342 u64Base = GCPhys;
4343 u32AccessRights = DescAttr.u;
4344 }
4345#endif
4346
4347 /* Validate. */
4348 Assert(!(u16Sel & RT_BIT(2)));
4349 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
4350 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
4351 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
4352 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
4353 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
4354 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
4355 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
4356 Assert( (u32Limit & 0xfff) == 0xfff
4357 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
4358 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
4359 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
4360
4361 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
4362 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
4363 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
4364 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
4365
4366 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
4367 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
4368 }
4369
4370 /*
4371 * Guest GDTR.
4372 */
4373 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
4374 {
4375 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
4376
4377 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
4378 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
4379
4380 /* Validate. */
4381 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4382
4383 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
4384 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
4385 }
4386
4387 /*
4388 * Guest LDTR.
4389 */
4390 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
4391 {
4392 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
4393
4394 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
4395 uint32_t u32Access;
4396 if ( !pVmxTransient->fIsNestedGuest
4397 && !pCtx->ldtr.Attr.u)
4398 u32Access = X86DESCATTR_UNUSABLE;
4399 else
4400 u32Access = pCtx->ldtr.Attr.u;
4401
4402 rc = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
4403 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
4404 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
4405 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
4406
4407 /* Validate. */
4408 if (!(u32Access & X86DESCATTR_UNUSABLE))
4409 {
4410 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
4411 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
4412 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
4413 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
4414 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
4415 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
4416 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
4417 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
4418 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
4419 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
4420 }
4421
4422 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
4423 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
4424 }
4425
4426 /*
4427 * Guest IDTR.
4428 */
4429 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
4430 {
4431 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
4432
4433 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
4434 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
4435
4436 /* Validate. */
4437 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4438
4439 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
4440 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
4441 }
4442
4443 return VINF_SUCCESS;
4444}
4445
4446
4447/**
4448 * Exports certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
4449 * areas.
4450 *
4451 * These MSRs will automatically be loaded to the host CPU on every successful
4452 * VM-entry and stored from the host CPU on every successful VM-exit.
4453 *
4454 * We creates/updates MSR slots for the host MSRs in the VM-exit MSR-load area. The
4455 * actual host MSR values are not- updated here for performance reasons. See
4456 * vmxHCExportHostMsrs().
4457 *
4458 * We also exports the guest sysenter MSRs into the guest-state area in the VMCS.
4459 *
4460 * @returns VBox status code.
4461 * @param pVCpu The cross context virtual CPU structure.
4462 * @param pVmxTransient The VMX-transient structure.
4463 *
4464 * @remarks No-long-jump zone!!!
4465 */
4466static int vmxHCExportGuestMsrs(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
4467{
4468 AssertPtr(pVCpu);
4469 AssertPtr(pVmxTransient);
4470
4471 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4472 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4473
4474 /*
4475 * MSRs that we use the auto-load/store MSR area in the VMCS.
4476 * For 64-bit hosts, we load/restore them lazily, see vmxHCLazyLoadGuestMsrs(),
4477 * nothing to do here. The host MSR values are updated when it's safe in
4478 * vmxHCLazySaveHostMsrs().
4479 *
4480 * For nested-guests, the guests MSRs from the VM-entry MSR-load area are already
4481 * loaded (into the guest-CPU context) by the VMLAUNCH/VMRESUME instruction
4482 * emulation. The merged MSR permission bitmap will ensure that we get VM-exits
4483 * for any MSR that are not part of the lazy MSRs so we do not need to place
4484 * those MSRs into the auto-load/store MSR area. Nothing to do here.
4485 */
4486 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
4487 {
4488 /* No auto-load/store MSRs currently. */
4489 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS);
4490 }
4491
4492 /*
4493 * Guest Sysenter MSRs.
4494 */
4495 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
4496 {
4497 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4498
4499 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
4500 {
4501 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);
4502 AssertRC(rc);
4503 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR);
4504 }
4505
4506 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
4507 {
4508 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
4509 AssertRC(rc);
4510 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
4511 }
4512
4513 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
4514 {
4515 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
4516 AssertRC(rc);
4517 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
4518 }
4519 }
4520
4521 /*
4522 * Guest/host EFER MSR.
4523 */
4524 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)
4525 {
4526 /* Whether we are using the VMCS to swap the EFER MSR must have been
4527 determined earlier while exporting VM-entry/VM-exit controls. */
4528 Assert(!(ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS));
4529 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
4530
4531 if (vmxHCShouldSwapEferMsr(pVCpu, pVmxTransient))
4532 {
4533 /*
4534 * EFER.LME is written by software, while EFER.LMA is set by the CPU to (CR0.PG & EFER.LME).
4535 * This means a guest can set EFER.LME=1 while CR0.PG=0 and EFER.LMA can remain 0.
4536 * VT-x requires that "IA-32e mode guest" VM-entry control must be identical to EFER.LMA
4537 * and to CR0.PG. Without unrestricted execution, CR0.PG (used for VT-x, not the shadow)
4538 * must always be 1. This forces us to effectively clear both EFER.LMA and EFER.LME until
4539 * the guest has also set CR0.PG=1. Otherwise, we would run into an invalid-guest state
4540 * during VM-entry.
4541 */
4542 uint64_t uGuestEferMsr = pCtx->msrEFER;
4543 if (!VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
4544 {
4545 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
4546 uGuestEferMsr &= ~MSR_K6_EFER_LME;
4547 else
4548 Assert((pCtx->msrEFER & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME));
4549 }
4550
4551 /*
4552 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
4553 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
4554 */
4555 if (g_fHmVmxSupportsVmcsEfer)
4556 {
4557 int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, uGuestEferMsr);
4558 AssertRC(rc);
4559 }
4560 else
4561 {
4562 /*
4563 * We shall use the auto-load/store MSR area only for loading the EFER MSR but we must
4564 * continue to intercept guest read and write accesses to it, see @bugref{7386#c16}.
4565 */
4566 int rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER, uGuestEferMsr,
4567 false /* fSetReadWrite */, false /* fUpdateHostMsr */);
4568 AssertRCReturn(rc, rc);
4569 }
4570
4571 Log4Func(("efer=%#RX64 shadow=%#RX64\n", uGuestEferMsr, pCtx->msrEFER));
4572 }
4573 else if (!g_fHmVmxSupportsVmcsEfer)
4574 vmxHCRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER);
4575
4576 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
4577 }
4578
4579 /*
4580 * Other MSRs.
4581 */
4582 if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_OTHER_MSRS)
4583 {
4584 /* Speculation Control (R/W). */
4585 HMVMX_CPUMCTX_ASSERT(pVCpu, HM_CHANGED_GUEST_OTHER_MSRS);
4586 if (pVM->cpum.ro.GuestFeatures.fIbrs)
4587 {
4588 int rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu),
4589 false /* fSetReadWrite */, false /* fUpdateHostMsr */);
4590 AssertRCReturn(rc, rc);
4591 }
4592
4593#ifdef IN_RING0 /** @todo */
4594 /* Last Branch Record. */
4595 if (VM_IS_VMX_LBR(pVM))
4596 {
4597 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmxTransient->pVmcsInfo->pShared;
4598 uint32_t const idFromIpMsrStart = pVM->hmr0.s.vmx.idLbrFromIpMsrFirst;
4599 uint32_t const idToIpMsrStart = pVM->hmr0.s.vmx.idLbrToIpMsrFirst;
4600 uint32_t const cLbrStack = pVM->hmr0.s.vmx.idLbrFromIpMsrLast - pVM->hmr0.s.vmx.idLbrFromIpMsrFirst + 1;
4601 Assert(cLbrStack <= 32);
4602 for (uint32_t i = 0; i < cLbrStack; i++)
4603 {
4604 int rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, idFromIpMsrStart + i,
4605 pVmcsInfoShared->au64LbrFromIpMsr[i],
4606 false /* fSetReadWrite */, false /* fUpdateHostMsr */);
4607 AssertRCReturn(rc, rc);
4608
4609 /* Some CPUs don't have a Branch-To-IP MSR (P4 and related Xeons). */
4610 if (idToIpMsrStart != 0)
4611 {
4612 rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, idToIpMsrStart + i,
4613 pVmcsInfoShared->au64LbrToIpMsr[i],
4614 false /* fSetReadWrite */, false /* fUpdateHostMsr */);
4615 AssertRCReturn(rc, rc);
4616 }
4617 }
4618
4619 /* Add LBR top-of-stack MSR (which contains the index to the most recent record). */
4620 int rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, pVM->hmr0.s.vmx.idLbrTosMsr,
4621 pVmcsInfoShared->u64LbrTosMsr, false /* fSetReadWrite */,
4622 false /* fUpdateHostMsr */);
4623 AssertRCReturn(rc, rc);
4624 }
4625#endif /* !IN_RING0 */
4626
4627 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS);
4628 }
4629
4630 return VINF_SUCCESS;
4631}
4632
4633
4634#ifdef IN_RING0
4635/**
4636 * Sets up the usage of TSC-offsetting and updates the VMCS.
4637 *
4638 * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the
4639 * VMX-preemption timer.
4640 *
4641 * @returns VBox status code.
4642 * @param pVCpu The cross context virtual CPU structure.
4643 * @param pVmxTransient The VMX-transient structure.
4644 * @param idCurrentCpu The current CPU number.
4645 *
4646 * @remarks No-long-jump zone!!!
4647 */
4648static void vmxHCUpdateTscOffsettingAndPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, RTCPUID idCurrentCpu)
4649{
4650 bool fOffsettedTsc;
4651 bool fParavirtTsc;
4652 uint64_t uTscOffset;
4653 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4654 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
4655
4656 if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
4657 {
4658 /* The TMCpuTickGetDeadlineAndTscOffset function is expensive (calling it on
4659 every entry slowed down the bs2-test1 CPUID testcase by ~33% (on an 10980xe). */
4660 uint64_t cTicksToDeadline;
4661 if ( idCurrentCpu == pVCpu->hmr0.s.idLastCpu
4662 && TMVirtualSyncIsCurrentDeadlineVersion(pVM, pVCpu->hmr0.s.vmx.uTscDeadlineVersion))
4663 {
4664 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatVmxPreemptionReusingDeadline);
4665 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
4666 cTicksToDeadline = pVCpu->hmr0.s.vmx.uTscDeadline - SUPReadTsc();
4667 if ((int64_t)cTicksToDeadline > 0)
4668 { /* hopefully */ }
4669 else
4670 {
4671 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatVmxPreemptionReusingDeadlineExpired);
4672 cTicksToDeadline = 0;
4673 }
4674 }
4675 else
4676 {
4677 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatVmxPreemptionRecalcingDeadline);
4678 cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc,
4679 &pVCpu->hmr0.s.vmx.uTscDeadline,
4680 &pVCpu->hmr0.s.vmx.uTscDeadlineVersion);
4681 pVCpu->hmr0.s.vmx.uTscDeadline += cTicksToDeadline;
4682 if (cTicksToDeadline >= 128)
4683 { /* hopefully */ }
4684 else
4685 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatVmxPreemptionRecalcingDeadlineExpired);
4686 }
4687
4688 /* Make sure the returned values have sane upper and lower boundaries. */
4689 uint64_t const u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
4690 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second, 15.625ms. */ /** @todo r=bird: Once real+virtual timers move to separate thread, we can raise the upper limit (16ms isn't much). ASSUMES working poke cpu function. */
4691 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 32678); /* 1/32768th of a second, ~30us. */
4692 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
4693
4694 /** @todo r=ramshankar: We need to find a way to integrate nested-guest
4695 * preemption timers here. We probably need to clamp the preemption timer,
4696 * after converting the timer value to the host. */
4697 uint32_t const cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
4698 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_PREEMPT_TIMER_VALUE, cPreemptionTickCount);
4699 AssertRC(rc);
4700 }
4701 else
4702 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
4703
4704 if (fParavirtTsc)
4705 {
4706 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
4707 information before every VM-entry, hence disable it for performance sake. */
4708#if 0
4709 int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
4710 AssertRC(rc);
4711#endif
4712 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatTscParavirt);
4713 }
4714
4715 if ( fOffsettedTsc
4716 && RT_LIKELY(!pVCpu->hmr0.s.fDebugWantRdTscExit))
4717 {
4718 if (pVmxTransient->fIsNestedGuest)
4719 uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset);
4720 vmxHCSetTscOffsetVmcs(pVCpu, pVmcsInfo, uTscOffset);
4721 vmxHCRemoveProcCtlsVmcs(pVCpu, pVmxTransient, VMX_PROC_CTLS_RDTSC_EXIT);
4722 }
4723 else
4724 {
4725 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
4726 vmxHCSetProcCtlsVmcs(pVmxTransient, VMX_PROC_CTLS_RDTSC_EXIT);
4727 }
4728}
4729#endif /* !IN_RING0 */
4730
4731
4732/**
4733 * Gets the IEM exception flags for the specified vector and IDT vectoring /
4734 * VM-exit interruption info type.
4735 *
4736 * @returns The IEM exception flags.
4737 * @param uVector The event vector.
4738 * @param uVmxEventType The VMX event type.
4739 *
4740 * @remarks This function currently only constructs flags required for
4741 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
4742 * and CR2 aspects of an exception are not included).
4743 */
4744static uint32_t vmxHCGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
4745{
4746 uint32_t fIemXcptFlags;
4747 switch (uVmxEventType)
4748 {
4749 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
4750 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
4751 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
4752 break;
4753
4754 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
4755 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
4756 break;
4757
4758 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
4759 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
4760 break;
4761
4762 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
4763 {
4764 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
4765 if (uVector == X86_XCPT_BP)
4766 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
4767 else if (uVector == X86_XCPT_OF)
4768 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
4769 else
4770 {
4771 fIemXcptFlags = 0;
4772 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
4773 }
4774 break;
4775 }
4776
4777 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
4778 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
4779 break;
4780
4781 default:
4782 fIemXcptFlags = 0;
4783 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
4784 break;
4785 }
4786 return fIemXcptFlags;
4787}
4788
4789
4790/**
4791 * Sets an event as a pending event to be injected into the guest.
4792 *
4793 * @param pVCpu The cross context virtual CPU structure.
4794 * @param u32IntInfo The VM-entry interruption-information field.
4795 * @param cbInstr The VM-entry instruction length in bytes (for
4796 * software interrupts, exceptions and privileged
4797 * software exceptions).
4798 * @param u32ErrCode The VM-entry exception error code.
4799 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
4800 * page-fault.
4801 */
4802DECLINLINE(void) vmxHCSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
4803 RTGCUINTPTR GCPtrFaultAddress)
4804{
4805 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
4806 VCPU_2_VMXSTATE(pVCpu).Event.fPending = true;
4807 VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo = u32IntInfo;
4808 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode = u32ErrCode;
4809 VCPU_2_VMXSTATE(pVCpu).Event.cbInstr = cbInstr;
4810 VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
4811}
4812
4813
4814/**
4815 * Sets an external interrupt as pending-for-injection into the VM.
4816 *
4817 * @param pVCpu The cross context virtual CPU structure.
4818 * @param u8Interrupt The external interrupt vector.
4819 */
4820DECLINLINE(void) vmxHCSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
4821{
4822 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
4823 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
4824 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4825 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4826 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
4827}
4828
4829
4830/**
4831 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
4832 *
4833 * @param pVCpu The cross context virtual CPU structure.
4834 */
4835DECLINLINE(void) vmxHCSetPendingXcptNmi(PVMCPUCC pVCpu)
4836{
4837 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
4838 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
4839 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4840 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4841 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
4842}
4843
4844
4845/**
4846 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
4847 *
4848 * @param pVCpu The cross context virtual CPU structure.
4849 */
4850DECLINLINE(void) vmxHCSetPendingXcptDF(PVMCPUCC pVCpu)
4851{
4852 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
4853 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
4854 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
4855 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4856 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
4857}
4858
4859
4860/**
4861 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
4862 *
4863 * @param pVCpu The cross context virtual CPU structure.
4864 */
4865DECLINLINE(void) vmxHCSetPendingXcptUD(PVMCPUCC pVCpu)
4866{
4867 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
4868 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
4869 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4870 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4871 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
4872}
4873
4874
4875/**
4876 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
4877 *
4878 * @param pVCpu The cross context virtual CPU structure.
4879 */
4880DECLINLINE(void) vmxHCSetPendingXcptDB(PVMCPUCC pVCpu)
4881{
4882 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
4883 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
4884 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
4885 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4886 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
4887}
4888
4889
4890#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4891/**
4892 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
4893 *
4894 * @param pVCpu The cross context virtual CPU structure.
4895 * @param u32ErrCode The error code for the general-protection exception.
4896 */
4897DECLINLINE(void) vmxHCSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
4898{
4899 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
4900 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
4901 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
4902 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4903 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
4904}
4905
4906
4907/**
4908 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
4909 *
4910 * @param pVCpu The cross context virtual CPU structure.
4911 * @param u32ErrCode The error code for the stack exception.
4912 */
4913DECLINLINE(void) vmxHCSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
4914{
4915 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
4916 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
4917 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
4918 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
4919 vmxHCSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
4920}
4921#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
4922
4923
4924/**
4925 * Fixes up attributes for the specified segment register.
4926 *
4927 * @param pVCpu The cross context virtual CPU structure.
4928 * @param pSelReg The segment register that needs fixing.
4929 * @param pszRegName The register name (for logging and assertions).
4930 */
4931static void vmxHCFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
4932{
4933 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
4934
4935 /*
4936 * If VT-x marks the segment as unusable, most other bits remain undefined:
4937 * - For CS the L, D and G bits have meaning.
4938 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
4939 * - For the remaining data segments no bits are defined.
4940 *
4941 * The present bit and the unusable bit has been observed to be set at the
4942 * same time (the selector was supposed to be invalid as we started executing
4943 * a V8086 interrupt in ring-0).
4944 *
4945 * What should be important for the rest of the VBox code, is that the P bit is
4946 * cleared. Some of the other VBox code recognizes the unusable bit, but
4947 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
4948 * safe side here, we'll strip off P and other bits we don't care about. If
4949 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
4950 *
4951 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
4952 */
4953#ifdef VBOX_STRICT
4954 uint32_t const uAttr = pSelReg->Attr.u;
4955#endif
4956
4957 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
4958 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
4959 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
4960
4961#ifdef VBOX_STRICT
4962# ifdef IN_RING0
4963 VMMRZCallRing3Disable(pVCpu);
4964# endif
4965 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
4966# ifdef DEBUG_bird
4967 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
4968 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
4969 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
4970# endif
4971# ifdef IN_RING0
4972 VMMRZCallRing3Enable(pVCpu);
4973# endif
4974 NOREF(uAttr);
4975#endif
4976 RT_NOREF2(pVCpu, pszRegName);
4977}
4978
4979
4980/**
4981 * Imports a guest segment register from the current VMCS into the guest-CPU
4982 * context.
4983 *
4984 * @param pVCpu The cross context virtual CPU structure.
4985 * @param iSegReg The segment register number (X86_SREG_XXX).
4986 *
4987 * @remarks Called with interrupts and/or preemption disabled.
4988 */
4989static void vmxHCImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
4990{
4991 Assert(iSegReg < X86_SREG_COUNT);
4992 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
4993 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
4994 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
4995 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
4996
4997 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
4998
4999 uint16_t u16Sel;
5000 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
5001 pSelReg->Sel = u16Sel;
5002 pSelReg->ValidSel = u16Sel;
5003
5004 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
5005 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
5006
5007 uint32_t u32Attr;
5008 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
5009 pSelReg->Attr.u = u32Attr;
5010 if (u32Attr & X86DESCATTR_UNUSABLE)
5011 vmxHCFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
5012
5013 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
5014}
5015
5016
5017/**
5018 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
5019 *
5020 * @param pVCpu The cross context virtual CPU structure.
5021 *
5022 * @remarks Called with interrupts and/or preemption disabled.
5023 */
5024static void vmxHCImportGuestLdtr(PVMCPUCC pVCpu)
5025{
5026 uint16_t u16Sel;
5027 uint64_t u64Base;
5028 uint32_t u32Limit, u32Attr;
5029 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
5030 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
5031 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
5032 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
5033
5034 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
5035 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
5036 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5037 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
5038 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
5039 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
5040 if (u32Attr & X86DESCATTR_UNUSABLE)
5041 vmxHCFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
5042}
5043
5044
5045/**
5046 * Imports the guest TR from the current VMCS into the guest-CPU context.
5047 *
5048 * @param pVCpu The cross context virtual CPU structure.
5049 *
5050 * @remarks Called with interrupts and/or preemption disabled.
5051 */
5052static void vmxHCImportGuestTr(PVMCPUCC pVCpu)
5053{
5054 uint16_t u16Sel;
5055 uint64_t u64Base;
5056 uint32_t u32Limit, u32Attr;
5057 int rc = VMX_VMCS_READ_16(pVCpu, VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
5058 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
5059 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
5060 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
5061
5062 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
5063 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
5064 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
5065 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
5066 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
5067 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
5068 /* TR is the only selector that can never be unusable. */
5069 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
5070}
5071
5072
5073/**
5074 * Imports the guest RIP from the VMCS back into the guest-CPU context.
5075 *
5076 * @param pVCpu The cross context virtual CPU structure.
5077 *
5078 * @remarks Called with interrupts and/or preemption disabled, should not assert!
5079 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
5080 * instead!!!
5081 */
5082static void vmxHCImportGuestRip(PVMCPUCC pVCpu)
5083{
5084 uint64_t u64Val;
5085 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5086 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
5087 {
5088 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
5089 AssertRC(rc);
5090
5091 pCtx->rip = u64Val;
5092 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
5093 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
5094 }
5095}
5096
5097
5098/**
5099 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
5100 *
5101 * @param pVCpu The cross context virtual CPU structure.
5102 * @param pVmcsInfo The VMCS info. object.
5103 *
5104 * @remarks Called with interrupts and/or preemption disabled, should not assert!
5105 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
5106 * instead!!!
5107 */
5108static void vmxHCImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5109{
5110 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5111 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
5112 {
5113 uint64_t u64Val;
5114 int rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
5115 AssertRC(rc);
5116
5117 pCtx->rflags.u64 = u64Val;
5118#ifdef IN_RING0
5119 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
5120 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
5121 {
5122 pCtx->eflags.Bits.u1VM = 0;
5123 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
5124 }
5125#else
5126 RT_NOREF(pVmcsInfo);
5127#endif
5128 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
5129 }
5130}
5131
5132
5133/**
5134 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
5135 * context.
5136 *
5137 * @param pVCpu The cross context virtual CPU structure.
5138 * @param pVmcsInfo The VMCS info. object.
5139 *
5140 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
5141 * do not log!
5142 * @remarks Do -not- call this function directly, use vmxHCImportGuestState()
5143 * instead!!!
5144 */
5145static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
5146{
5147 uint32_t u32Val;
5148 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
5149 if (!u32Val)
5150 {
5151 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
5152 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5153 CPUMSetGuestNmiBlocking(pVCpu, false);
5154 }
5155 else
5156 {
5157 /*
5158 * We must import RIP here to set our EM interrupt-inhibited state.
5159 * We also import RFLAGS as our code that evaluates pending interrupts
5160 * before VM-entry requires it.
5161 */
5162 vmxHCImportGuestRip(pVCpu);
5163 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
5164
5165 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5166 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
5167 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
5168 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5169
5170 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
5171 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
5172 }
5173}
5174
5175
5176/**
5177 * Worker for VMXR0ImportStateOnDemand.
5178 *
5179 * @returns VBox status code.
5180 * @param pVCpu The cross context virtual CPU structure.
5181 * @param pVmcsInfo The VMCS info. object.
5182 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
5183 */
5184static int vmxHCImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
5185{
5186 int rc = VINF_SUCCESS;
5187 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5188 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5189 uint32_t u32Val;
5190
5191 /*
5192 * Note! This is hack to workaround a mysterious BSOD observed with release builds
5193 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
5194 * neither are other host platforms.
5195 *
5196 * Committing this temporarily as it prevents BSOD.
5197 *
5198 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
5199 */
5200# ifdef RT_OS_WINDOWS
5201 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
5202 return VERR_HM_IPE_1;
5203# endif
5204
5205 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
5206
5207#ifdef IN_RING0
5208 /*
5209 * We disable interrupts to make the updating of the state and in particular
5210 * the fExtrn modification atomic wrt to preemption hooks.
5211 */
5212 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
5213#endif
5214
5215 fWhat &= pCtx->fExtrn;
5216 if (fWhat)
5217 {
5218 do
5219 {
5220 if (fWhat & CPUMCTX_EXTRN_RIP)
5221 vmxHCImportGuestRip(pVCpu);
5222
5223 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
5224 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
5225
5226 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
5227 vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
5228
5229 if (fWhat & CPUMCTX_EXTRN_RSP)
5230 {
5231 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RSP, &pCtx->rsp);
5232 AssertRC(rc);
5233 }
5234
5235 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
5236 {
5237 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
5238#ifndef IN_NEM_DARWIN
5239 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
5240#else
5241 bool const fRealOnV86Active = false; /* HV supports only unrestricted guest execution. */
5242#endif
5243 if (fWhat & CPUMCTX_EXTRN_CS)
5244 {
5245 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
5246 vmxHCImportGuestRip(pVCpu);
5247 if (fRealOnV86Active)
5248 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
5249 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
5250 }
5251 if (fWhat & CPUMCTX_EXTRN_SS)
5252 {
5253 vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
5254 if (fRealOnV86Active)
5255 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
5256 }
5257 if (fWhat & CPUMCTX_EXTRN_DS)
5258 {
5259 vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
5260 if (fRealOnV86Active)
5261 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
5262 }
5263 if (fWhat & CPUMCTX_EXTRN_ES)
5264 {
5265 vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
5266 if (fRealOnV86Active)
5267 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
5268 }
5269 if (fWhat & CPUMCTX_EXTRN_FS)
5270 {
5271 vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
5272 if (fRealOnV86Active)
5273 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
5274 }
5275 if (fWhat & CPUMCTX_EXTRN_GS)
5276 {
5277 vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
5278 if (fRealOnV86Active)
5279 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
5280 }
5281 }
5282
5283 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
5284 {
5285 if (fWhat & CPUMCTX_EXTRN_LDTR)
5286 vmxHCImportGuestLdtr(pVCpu);
5287
5288 if (fWhat & CPUMCTX_EXTRN_GDTR)
5289 {
5290 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
5291 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
5292 pCtx->gdtr.cbGdt = u32Val;
5293 }
5294
5295 /* Guest IDTR. */
5296 if (fWhat & CPUMCTX_EXTRN_IDTR)
5297 {
5298 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
5299 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
5300 pCtx->idtr.cbIdt = u32Val;
5301 }
5302
5303 /* Guest TR. */
5304 if (fWhat & CPUMCTX_EXTRN_TR)
5305 {
5306#ifdef IN_RING0
5307 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
5308 don't need to import that one. */
5309 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5310#endif
5311 vmxHCImportGuestTr(pVCpu);
5312 }
5313 }
5314
5315 if (fWhat & CPUMCTX_EXTRN_DR7)
5316 {
5317#ifdef IN_RING0
5318 if (!pVCpu->hmr0.s.fUsingHyperDR7)
5319#endif
5320 {
5321 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
5322 AssertRC(rc);
5323 }
5324 }
5325
5326 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
5327 {
5328 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
5329 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
5330 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
5331 pCtx->SysEnter.cs = u32Val;
5332 }
5333
5334#ifdef IN_RING0
5335 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
5336 {
5337 if ( pVM->hmr0.s.fAllow64BitGuests
5338 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
5339 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
5340 }
5341
5342 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
5343 {
5344 if ( pVM->hmr0.s.fAllow64BitGuests
5345 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
5346 {
5347 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
5348 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
5349 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
5350 }
5351 }
5352
5353 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
5354 {
5355 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
5356 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
5357 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
5358 Assert(pMsrs);
5359 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
5360 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
5361 for (uint32_t i = 0; i < cMsrs; i++)
5362 {
5363 uint32_t const idMsr = pMsrs[i].u32Msr;
5364 switch (idMsr)
5365 {
5366 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
5367 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
5368 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
5369 default:
5370 {
5371 uint32_t idxLbrMsr;
5372 if (VM_IS_VMX_LBR(pVM))
5373 {
5374 if (vmxHCIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
5375 {
5376 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
5377 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
5378 break;
5379 }
5380 if (vmxHCIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
5381 {
5382 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
5383 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
5384 break;
5385 }
5386 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
5387 {
5388 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
5389 break;
5390 }
5391 /* Fallthru (no break) */
5392 }
5393 pCtx->fExtrn = 0;
5394 VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
5395 ASMSetFlags(fEFlags);
5396 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
5397 return VERR_HM_UNEXPECTED_LD_ST_MSR;
5398 }
5399 }
5400 }
5401 }
5402#endif
5403
5404 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
5405 {
5406 if (fWhat & CPUMCTX_EXTRN_CR0)
5407 {
5408 uint64_t u64Cr0;
5409 uint64_t u64Shadow;
5410 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
5411 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
5412#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
5413 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
5414 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
5415#else
5416 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
5417 {
5418 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
5419 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
5420 }
5421 else
5422 {
5423 /*
5424 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
5425 * the nested-guest using hardware-assisted VMX. Accordingly we need to
5426 * re-construct CR0. See @bugref{9180#c95} for details.
5427 */
5428 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
5429 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5430 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
5431 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
5432 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
5433 }
5434#endif
5435#ifdef IN_RING0
5436 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
5437#endif
5438 CPUMSetGuestCR0(pVCpu, u64Cr0);
5439#ifdef IN_RING0
5440 VMMRZCallRing3Enable(pVCpu);
5441#endif
5442 }
5443
5444 if (fWhat & CPUMCTX_EXTRN_CR4)
5445 {
5446 uint64_t u64Cr4;
5447 uint64_t u64Shadow;
5448 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
5449 rc |= VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
5450#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
5451 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
5452 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
5453#else
5454 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
5455 {
5456 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
5457 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
5458 }
5459 else
5460 {
5461 /*
5462 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
5463 * the nested-guest using hardware-assisted VMX. Accordingly we need to
5464 * re-construct CR4. See @bugref{9180#c95} for details.
5465 */
5466 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
5467 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5468 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
5469 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
5470 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
5471 }
5472#endif
5473 pCtx->cr4 = u64Cr4;
5474 }
5475
5476 if (fWhat & CPUMCTX_EXTRN_CR3)
5477 {
5478 /* CR0.PG bit changes are always intercepted, so it's up to date. */
5479 if ( VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
5480 || ( VM_IS_VMX_NESTED_PAGING(pVM)
5481 && CPUMIsGuestPagingEnabledEx(pCtx)))
5482 {
5483 uint64_t u64Cr3;
5484 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
5485 if (pCtx->cr3 != u64Cr3)
5486 {
5487 pCtx->cr3 = u64Cr3;
5488 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
5489 }
5490
5491 /*
5492 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
5493 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
5494 */
5495 if (CPUMIsGuestInPAEModeEx(pCtx))
5496 {
5497 X86PDPE aPaePdpes[4];
5498 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
5499 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
5500 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
5501 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
5502 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
5503 {
5504 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
5505 /* PGM now updates PAE PDPTEs while updating CR3. */
5506 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
5507 }
5508 }
5509 }
5510 }
5511 }
5512
5513#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5514 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
5515 {
5516 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
5517 && !CPUMIsGuestInVmxNonRootMode(pCtx))
5518 {
5519 Assert(CPUMIsGuestInVmxRootMode(pCtx));
5520 rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
5521 if (RT_SUCCESS(rc))
5522 { /* likely */ }
5523 else
5524 break;
5525 }
5526 }
5527#endif
5528 } while (0);
5529
5530 if (RT_SUCCESS(rc))
5531 {
5532 /* Update fExtrn. */
5533 pCtx->fExtrn &= ~fWhat;
5534
5535 /* If everything has been imported, clear the HM keeper bit. */
5536 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
5537 {
5538#ifndef IN_NEM_DARWIN
5539 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
5540#else
5541 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_NEM;
5542#endif
5543 Assert(!pCtx->fExtrn);
5544 }
5545 }
5546 }
5547#ifdef IN_RING0
5548 else
5549 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
5550
5551 /*
5552 * Restore interrupts.
5553 */
5554 ASMSetFlags(fEFlags);
5555#endif
5556
5557 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
5558
5559 if (RT_SUCCESS(rc))
5560 { /* likely */ }
5561 else
5562 return rc;
5563
5564 /*
5565 * Honor any pending CR3 updates.
5566 *
5567 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
5568 * -> VMMRZCallRing3Disable() -> vmxHCImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
5569 * -> continue with VM-exit handling -> vmxHCImportGuestState() and here we are.
5570 *
5571 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
5572 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
5573 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
5574 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
5575 *
5576 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
5577 *
5578 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
5579 */
5580 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
5581#ifdef IN_RING0
5582 && VMMRZCallRing3IsEnabled(pVCpu)
5583#endif
5584 )
5585 {
5586 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
5587 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
5588 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5589 }
5590
5591 return VINF_SUCCESS;
5592}
5593
5594
5595/**
5596 * Check per-VM and per-VCPU force flag actions that require us to go back to
5597 * ring-3 for one reason or another.
5598 *
5599 * @returns Strict VBox status code (i.e. informational status codes too)
5600 * @retval VINF_SUCCESS if we don't have any actions that require going back to
5601 * ring-3.
5602 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
5603 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
5604 * interrupts)
5605 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
5606 * all EMTs to be in ring-3.
5607 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
5608 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
5609 * to the EM loop.
5610 *
5611 * @param pVCpu The cross context virtual CPU structure.
5612 * @param fIsNestedGuest Flag whether this is for a for a pending nested guest event.
5613 * @param fStepping Whether we are single-stepping the guest using the
5614 * hypervisor debugger.
5615 *
5616 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
5617 * is no longer in VMX non-root mode.
5618 */
5619static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
5620{
5621#ifdef IN_RING0
5622 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5623#endif
5624
5625 /*
5626 * Update pending interrupts into the APIC's IRR.
5627 */
5628 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
5629 APICUpdatePendingInterrupts(pVCpu);
5630
5631 /*
5632 * Anything pending? Should be more likely than not if we're doing a good job.
5633 */
5634 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5635 if ( !fStepping
5636 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
5637 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
5638 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
5639 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
5640 return VINF_SUCCESS;
5641
5642 /* Pending PGM C3 sync. */
5643 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
5644 {
5645 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5646 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
5647 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
5648 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
5649 if (rcStrict != VINF_SUCCESS)
5650 {
5651 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
5652 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
5653 return rcStrict;
5654 }
5655 }
5656
5657 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
5658 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
5659 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
5660 {
5661 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHmToR3FF);
5662 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
5663 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
5664 return rc;
5665 }
5666
5667 /* Pending VM request packets, such as hardware interrupts. */
5668 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
5669 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
5670 {
5671 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchVmReq);
5672 Log4Func(("Pending VM request forcing us back to ring-3\n"));
5673 return VINF_EM_PENDING_REQUEST;
5674 }
5675
5676 /* Pending PGM pool flushes. */
5677 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
5678 {
5679 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchPgmPoolFlush);
5680 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
5681 return VINF_PGM_POOL_FLUSH_PENDING;
5682 }
5683
5684 /* Pending DMA requests. */
5685 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
5686 {
5687 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchDma);
5688 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
5689 return VINF_EM_RAW_TO_R3;
5690 }
5691
5692#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5693 /*
5694 * Pending nested-guest events.
5695 *
5696 * Please note the priority of these events are specified and important.
5697 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
5698 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
5699 */
5700 if (fIsNestedGuest)
5701 {
5702 /* Pending nested-guest APIC-write. */
5703 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
5704 {
5705 Log4Func(("Pending nested-guest APIC-write\n"));
5706 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
5707 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5708 return rcStrict;
5709 }
5710
5711 /* Pending nested-guest monitor-trap flag (MTF). */
5712 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
5713 {
5714 Log4Func(("Pending nested-guest MTF\n"));
5715 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
5716 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5717 return rcStrict;
5718 }
5719
5720 /* Pending nested-guest VMX-preemption timer expired. */
5721 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
5722 {
5723 Log4Func(("Pending nested-guest preempt timer\n"));
5724 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
5725 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
5726 return rcStrict;
5727 }
5728 }
5729#else
5730 NOREF(fIsNestedGuest);
5731#endif
5732
5733 return VINF_SUCCESS;
5734}
5735
5736
5737/**
5738 * Converts any TRPM trap into a pending HM event. This is typically used when
5739 * entering from ring-3 (not longjmp returns).
5740 *
5741 * @param pVCpu The cross context virtual CPU structure.
5742 */
5743static void vmxHCTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
5744{
5745 Assert(TRPMHasTrap(pVCpu));
5746 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5747
5748 uint8_t uVector;
5749 TRPMEVENT enmTrpmEvent;
5750 uint32_t uErrCode;
5751 RTGCUINTPTR GCPtrFaultAddress;
5752 uint8_t cbInstr;
5753 bool fIcebp;
5754
5755 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
5756 AssertRC(rc);
5757
5758 uint32_t u32IntInfo;
5759 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
5760 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
5761
5762 rc = TRPMResetTrap(pVCpu);
5763 AssertRC(rc);
5764 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
5765 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
5766
5767 vmxHCSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
5768}
5769
5770
5771/**
5772 * Converts the pending HM event into a TRPM trap.
5773 *
5774 * @param pVCpu The cross context virtual CPU structure.
5775 */
5776static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
5777{
5778 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
5779
5780 /* If a trap was already pending, we did something wrong! */
5781 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
5782
5783 uint32_t const u32IntInfo = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
5784 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
5785 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
5786
5787 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
5788
5789 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
5790 AssertRC(rc);
5791
5792 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
5793 TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
5794
5795 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
5796 TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
5797 else
5798 {
5799 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
5800 switch (uVectorType)
5801 {
5802 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
5803 TRPMSetTrapDueToIcebp(pVCpu);
5804 RT_FALL_THRU();
5805 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
5806 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
5807 {
5808 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5809 || ( uVector == X86_XCPT_BP /* INT3 */
5810 || uVector == X86_XCPT_OF /* INTO */
5811 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
5812 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
5813 TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
5814 break;
5815 }
5816 }
5817 }
5818
5819 /* We're now done converting the pending event. */
5820 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
5821}
5822
5823
5824/**
5825 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
5826 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
5827 *
5828 * @param pVCpu The cross context virtual CPU structure.
5829 * @param pVmcsInfo The VMCS info. object.
5830 */
5831static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
5832{
5833 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
5834 {
5835 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
5836 {
5837 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
5838 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
5839 AssertRC(rc);
5840 }
5841 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
5842}
5843
5844
5845/**
5846 * Clears the interrupt-window exiting control in the VMCS.
5847 *
5848 * @param pVCpu The cross context virtual CPU structure.
5849 * @param pVmcsInfo The VMCS info. object.
5850 */
5851DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
5852{
5853 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
5854 {
5855 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
5856 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
5857 AssertRC(rc);
5858 }
5859}
5860
5861
5862/**
5863 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
5864 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
5865 *
5866 * @param pVCpu The cross context virtual CPU structure.
5867 * @param pVmcsInfo The VMCS info. object.
5868 */
5869static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
5870{
5871 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
5872 {
5873 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
5874 {
5875 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
5876 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
5877 AssertRC(rc);
5878 Log4Func(("Setup NMI-window exiting\n"));
5879 }
5880 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
5881}
5882
5883
5884/**
5885 * Clears the NMI-window exiting control in the VMCS.
5886 *
5887 * @param pVCpu The cross context virtual CPU structure.
5888 * @param pVmcsInfo The VMCS info. object.
5889 */
5890DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
5891{
5892 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
5893 {
5894 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
5895 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
5896 AssertRC(rc);
5897 }
5898}
5899
5900
5901#ifdef IN_RING0
5902/**
5903 * Does the necessary state syncing before returning to ring-3 for any reason
5904 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
5905 *
5906 * @returns VBox status code.
5907 * @param pVCpu The cross context virtual CPU structure.
5908 * @param fImportState Whether to import the guest state from the VMCS back
5909 * to the guest-CPU context.
5910 *
5911 * @remarks No-long-jmp zone!!!
5912 */
5913static int vmxHCLeave(PVMCPUCC pVCpu, bool fImportState)
5914{
5915 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
5916 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
5917
5918 RTCPUID const idCpu = RTMpCpuId();
5919 Log4Func(("HostCpuId=%u\n", idCpu));
5920
5921 /*
5922 * !!! IMPORTANT !!!
5923 * If you modify code here, check whether VMXR0CallRing3Callback() needs to be updated too.
5924 */
5925
5926 /* Save the guest state if necessary. */
5927 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
5928 if (fImportState)
5929 {
5930 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
5931 AssertRCReturn(rc, rc);
5932 }
5933
5934 /* Restore host FPU state if necessary. We will resync on next R0 reentry. */
5935 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
5936 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
5937
5938 /* Restore host debug registers if necessary. We will resync on next R0 reentry. */
5939#ifdef VBOX_STRICT
5940 if (CPUMIsHyperDebugStateActive(pVCpu))
5941 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT);
5942#endif
5943 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
5944 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
5945 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
5946
5947 /* Restore host-state bits that VT-x only restores partially. */
5948 if (pVCpu->hmr0.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)
5949 {
5950 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hmr0.s.vmx.fRestoreHostFlags, idCpu));
5951 VMXRestoreHostState(pVCpu->hmr0.s.vmx.fRestoreHostFlags, &pVCpu->hmr0.s.vmx.RestoreHost);
5952 }
5953 pVCpu->hmr0.s.vmx.fRestoreHostFlags = 0;
5954
5955 /* Restore the lazy host MSRs as we're leaving VT-x context. */
5956 if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
5957 {
5958 /* We shouldn't restore the host MSRs without saving the guest MSRs first. */
5959 if (!fImportState)
5960 {
5961 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS);
5962 AssertRCReturn(rc, rc);
5963 }
5964 vmxHCLazyRestoreHostMsrs(pVCpu);
5965 Assert(!pVCpu->hmr0.s.vmx.fLazyMsrs);
5966 }
5967 else
5968 pVCpu->hmr0.s.vmx.fLazyMsrs = 0;
5969
5970 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
5971 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
5972
5973 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatEntry);
5974 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState);
5975 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExportGuestState);
5976 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatPreExit);
5977 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitHandling);
5978 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitIO);
5979 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx);
5980 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi);
5981 STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry);
5982 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchLongJmpToR3);
5983
5984 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
5985
5986 /** @todo This partially defeats the purpose of having preemption hooks.
5987 * The problem is, deregistering the hooks should be moved to a place that
5988 * lasts until the EMT is about to be destroyed not everytime while leaving HM
5989 * context.
5990 */
5991 int rc = vmxHCClearVmcs(pVmcsInfo);
5992 AssertRCReturn(rc, rc);
5993
5994#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5995 /*
5996 * A valid shadow VMCS is made active as part of VM-entry. It is necessary to
5997 * clear a shadow VMCS before allowing that VMCS to become active on another
5998 * logical processor. We may or may not be importing guest state which clears
5999 * it, so cover for it here.
6000 *
6001 * See Intel spec. 24.11.1 "Software Use of Virtual-Machine Control Structures".
6002 */
6003 if ( pVmcsInfo->pvShadowVmcs
6004 && pVmcsInfo->fShadowVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
6005 {
6006 rc = vmxHCClearShadowVmcs(pVmcsInfo);
6007 AssertRCReturn(rc, rc);
6008 }
6009
6010 /*
6011 * Flag that we need to re-export the host state if we switch to this VMCS before
6012 * executing guest or nested-guest code.
6013 */
6014 pVmcsInfo->idHostCpuState = NIL_RTCPUID;
6015#endif
6016
6017 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
6018 NOREF(idCpu);
6019 return VINF_SUCCESS;
6020}
6021
6022
6023/**
6024 * Leaves the VT-x session.
6025 *
6026 * @returns VBox status code.
6027 * @param pVCpu The cross context virtual CPU structure.
6028 *
6029 * @remarks No-long-jmp zone!!!
6030 */
6031static int vmxHCLeaveSession(PVMCPUCC pVCpu)
6032{
6033 HM_DISABLE_PREEMPT(pVCpu);
6034 HMVMX_ASSERT_CPU_SAFE(pVCpu);
6035 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6036 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6037
6038 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
6039 and done this from the VMXR0ThreadCtxCallback(). */
6040 if (!pVCpu->hmr0.s.fLeaveDone)
6041 {
6042 int rc2 = vmxHCLeave(pVCpu, true /* fImportState */);
6043 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
6044 pVCpu->hmr0.s.fLeaveDone = true;
6045 }
6046 Assert(!pVCpu->cpum.GstCtx.fExtrn);
6047
6048 /*
6049 * !!! IMPORTANT !!!
6050 * If you modify code here, make sure to check whether VMXR0CallRing3Callback() needs to be updated too.
6051 */
6052
6053 /* Deregister hook now that we've left HM context before re-enabling preemption. */
6054 /** @todo Deregistering here means we need to VMCLEAR always
6055 * (longjmp/exit-to-r3) in VT-x which is not efficient, eliminate need
6056 * for calling VMMR0ThreadCtxHookDisable here! */
6057 VMMR0ThreadCtxHookDisable(pVCpu);
6058
6059 /* Leave HM context. This takes care of local init (term) and deregistering the longjmp-to-ring-3 callback. */
6060 int rc = HMR0LeaveCpu(pVCpu);
6061 HM_RESTORE_PREEMPT();
6062 return rc;
6063}
6064
6065
6066/**
6067 * Does the necessary state syncing before doing a longjmp to ring-3.
6068 *
6069 * @returns VBox status code.
6070 * @param pVCpu The cross context virtual CPU structure.
6071 *
6072 * @remarks No-long-jmp zone!!!
6073 */
6074DECLINLINE(int) vmxHCLongJmpToRing3(PVMCPUCC pVCpu)
6075{
6076 return vmxHCLeaveSession(pVCpu);
6077}
6078
6079
6080/**
6081 * Take necessary actions before going back to ring-3.
6082 *
6083 * An action requires us to go back to ring-3. This function does the necessary
6084 * steps before we can safely return to ring-3. This is not the same as longjmps
6085 * to ring-3, this is voluntary and prepares the guest so it may continue
6086 * executing outside HM (recompiler/IEM).
6087 *
6088 * @returns VBox status code.
6089 * @param pVCpu The cross context virtual CPU structure.
6090 * @param rcExit The reason for exiting to ring-3. Can be
6091 * VINF_VMM_UNKNOWN_RING3_CALL.
6092 */
6093static int vmxHCExitToRing3(PVMCPUCC pVCpu, VBOXSTRICTRC rcExit)
6094{
6095 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
6096
6097 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
6098 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
6099 {
6100 VMXGetCurrentVmcs(&VCPU_2_VMXSTATE(pVCpu).vmx.LastError.HCPhysCurrentVmcs);
6101 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32VmcsRev = *(uint32_t *)pVmcsInfo->pvVmcs;
6102 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.idEnteredCpu = pVCpu->hmr0.s.idEnteredCpu;
6103 /* LastError.idCurrentCpu was updated in vmxHCPreRunGuestCommitted(). */
6104 }
6105
6106 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
6107 VMMRZCallRing3Disable(pVCpu);
6108 Log4Func(("rcExit=%d\n", VBOXSTRICTRC_VAL(rcExit)));
6109
6110 /*
6111 * Convert any pending HM events back to TRPM due to premature exits to ring-3.
6112 * We need to do this only on returns to ring-3 and not for longjmps to ring3.
6113 *
6114 * This is because execution may continue from ring-3 and we would need to inject
6115 * the event from there (hence place it back in TRPM).
6116 */
6117 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
6118 {
6119 vmxHCPendingEventToTrpmTrap(pVCpu);
6120 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
6121
6122 /* Clear the events from the VMCS. */
6123 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0); AssertRC(rc);
6124 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, 0); AssertRC(rc);
6125 }
6126#ifdef VBOX_STRICT
6127 /*
6128 * We check for rcExit here since for errors like VERR_VMX_UNABLE_TO_START_VM (which are
6129 * fatal), we don't care about verifying duplicate injection of events. Errors like
6130 * VERR_EM_INTERPRET are converted to their VINF_* counterparts -prior- to calling this
6131 * function so those should and will be checked below.
6132 */
6133 else if (RT_SUCCESS(rcExit))
6134 {
6135 /*
6136 * Ensure we don't accidentally clear a pending HM event without clearing the VMCS.
6137 * This can be pretty hard to debug otherwise, interrupts might get injected twice
6138 * occasionally, see @bugref{9180#c42}.
6139 *
6140 * However, if the VM-entry failed, any VM entry-interruption info. field would
6141 * be left unmodified as the event would not have been injected to the guest. In
6142 * such cases, don't assert, we're not going to continue guest execution anyway.
6143 */
6144 uint32_t uExitReason;
6145 uint32_t uEntryIntInfo;
6146 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
6147 rc |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &uEntryIntInfo);
6148 AssertRC(rc);
6149 AssertMsg(VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason) || !VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo),
6150 ("uExitReason=%#RX32 uEntryIntInfo=%#RX32 rcExit=%d\n", uExitReason, uEntryIntInfo, VBOXSTRICTRC_VAL(rcExit)));
6151 }
6152#endif
6153
6154 /*
6155 * Clear the interrupt-window and NMI-window VMCS controls as we could have got
6156 * a VM-exit with higher priority than interrupt-window or NMI-window VM-exits
6157 * (e.g. TPR below threshold).
6158 */
6159 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
6160 {
6161 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
6162 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
6163 }
6164
6165 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
6166 and if we're injecting an event we should have a TRPM trap pending. */
6167 AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
6168#ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a triple fault in progress. */
6169 AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
6170#endif
6171
6172 /* Save guest state and restore host state bits. */
6173 int rc = vmxHCLeaveSession(pVCpu);
6174 AssertRCReturn(rc, rc);
6175 STAM_COUNTER_DEC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchLongJmpToR3);
6176
6177 /* Thread-context hooks are unregistered at this point!!! */
6178 /* Ring-3 callback notifications are unregistered at this point!!! */
6179
6180 /* Sync recompiler state. */
6181 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
6182 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
6183 | CPUM_CHANGED_LDTR
6184 | CPUM_CHANGED_GDTR
6185 | CPUM_CHANGED_IDTR
6186 | CPUM_CHANGED_TR
6187 | CPUM_CHANGED_HIDDEN_SEL_REGS);
6188 if ( pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging
6189 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx))
6190 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
6191
6192 Assert(!pVCpu->hmr0.s.fClearTrapFlag);
6193
6194 /* Update the exit-to-ring 3 reason. */
6195 VCPU_2_VMXSTATE(pVCpu).rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
6196
6197 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
6198 if ( rcExit != VINF_EM_RAW_INTERRUPT
6199 || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
6200 {
6201 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL));
6202 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6203 }
6204
6205 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchExitToR3);
6206 VMMRZCallRing3Enable(pVCpu);
6207 return rc;
6208}
6209
6210
6211/**
6212 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
6213 * stack.
6214 *
6215 * @returns Strict VBox status code (i.e. informational status codes too).
6216 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
6217 * @param pVCpu The cross context virtual CPU structure.
6218 * @param uValue The value to push to the guest stack.
6219 */
6220static VBOXSTRICTRC vmxHCRealModeGuestStackPush(PVMCPUCC pVCpu, uint16_t uValue)
6221{
6222 /*
6223 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
6224 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
6225 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
6226 */
6227 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6228 if (pCtx->sp == 1)
6229 return VINF_EM_RESET;
6230 pCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
6231 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->ss.u64Base + pCtx->sp, &uValue, sizeof(uint16_t));
6232 AssertRC(rc);
6233 return rc;
6234}
6235#endif /* !IN_RING */
6236
6237/**
6238 * Injects an event into the guest upon VM-entry by updating the relevant fields
6239 * in the VM-entry area in the VMCS.
6240 *
6241 * @returns Strict VBox status code (i.e. informational status codes too).
6242 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
6243 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
6244 *
6245 * @param pVCpu The cross context virtual CPU structure.
6246 * @param pVmxTransient The VMX-transient structure.
6247 * @param pEvent The event being injected.
6248 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
6249 * will be updated if necessary. This cannot not be NULL.
6250 * @param fStepping Whether we're single-stepping guest execution and should
6251 * return VINF_EM_DBG_STEPPED if the event is injected
6252 * directly (registers modified by us, not by hardware on
6253 * VM-entry).
6254 */
6255static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent, bool fStepping,
6256 uint32_t *pfIntrState)
6257{
6258 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
6259 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
6260 Assert(pfIntrState);
6261
6262#ifndef IN_RING0
6263 RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
6264#endif
6265
6266 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6267 uint32_t u32IntInfo = pEvent->u64IntInfo;
6268 uint32_t const u32ErrCode = pEvent->u32ErrCode;
6269 uint32_t const cbInstr = pEvent->cbInstr;
6270 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
6271 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
6272 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
6273
6274#ifdef VBOX_STRICT
6275 /*
6276 * Validate the error-code-valid bit for hardware exceptions.
6277 * No error codes for exceptions in real-mode.
6278 *
6279 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
6280 */
6281 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
6282 && !CPUMIsGuestInRealModeEx(pCtx))
6283 {
6284 switch (uVector)
6285 {
6286 case X86_XCPT_PF:
6287 case X86_XCPT_DF:
6288 case X86_XCPT_TS:
6289 case X86_XCPT_NP:
6290 case X86_XCPT_SS:
6291 case X86_XCPT_GP:
6292 case X86_XCPT_AC:
6293 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
6294 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
6295 RT_FALL_THRU();
6296 default:
6297 break;
6298 }
6299 }
6300
6301 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
6302 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
6303 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
6304#endif
6305
6306 RT_NOREF(uVector);
6307 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
6308 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
6309 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
6310 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
6311 {
6312 Assert(uVector <= X86_XCPT_LAST);
6313 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
6314 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
6315 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedXcpts[uVector]);
6316 }
6317 else
6318 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
6319
6320 /*
6321 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
6322 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
6323 * interrupt handler in the (real-mode) guest.
6324 *
6325 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
6326 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
6327 */
6328 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
6329 {
6330#ifdef IN_RING0
6331 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
6332#endif
6333 {
6334 /*
6335 * For CPUs with unrestricted guest execution enabled and with the guest
6336 * in real-mode, we must not set the deliver-error-code bit.
6337 *
6338 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6339 */
6340 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
6341 }
6342#ifdef IN_RING0
6343 else
6344 {
6345 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6346 Assert(PDMVmmDevHeapIsEnabled(pVM));
6347 Assert(pVM->hm.s.vmx.pRealModeTSS);
6348 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
6349
6350 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
6351 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
6352 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
6353 AssertRCReturn(rc2, rc2);
6354
6355 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
6356 size_t const cbIdtEntry = sizeof(X86IDTR16);
6357 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
6358 {
6359 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
6360 if (uVector == X86_XCPT_DF)
6361 return VINF_EM_RESET;
6362
6363 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
6364 No error codes for exceptions in real-mode. */
6365 if (uVector == X86_XCPT_GP)
6366 {
6367 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
6368 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
6369 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
6370 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6371 HMEVENT EventXcptDf;
6372 RT_ZERO(EventXcptDf);
6373 EventXcptDf.u64IntInfo = uXcptDfInfo;
6374 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState);
6375 }
6376
6377 /*
6378 * If we're injecting an event with no valid IDT entry, inject a #GP.
6379 * No error codes for exceptions in real-mode.
6380 *
6381 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
6382 */
6383 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
6384 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
6385 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
6386 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6387 HMEVENT EventXcptGp;
6388 RT_ZERO(EventXcptGp);
6389 EventXcptGp.u64IntInfo = uXcptGpInfo;
6390 return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState);
6391 }
6392
6393 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
6394 uint16_t uGuestIp = pCtx->ip;
6395 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
6396 {
6397 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
6398 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
6399 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
6400 }
6401 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
6402 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
6403
6404 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
6405 X86IDTR16 IdtEntry;
6406 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
6407 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
6408 AssertRCReturn(rc2, rc2);
6409
6410 /* Construct the stack frame for the interrupt/exception handler. */
6411 VBOXSTRICTRC rcStrict;
6412 rcStrict = vmxHCRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
6413 if (rcStrict == VINF_SUCCESS)
6414 {
6415 rcStrict = vmxHCRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
6416 if (rcStrict == VINF_SUCCESS)
6417 rcStrict = vmxHCRealModeGuestStackPush(pVCpu, uGuestIp);
6418 }
6419
6420 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
6421 if (rcStrict == VINF_SUCCESS)
6422 {
6423 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
6424 pCtx->rip = IdtEntry.offSel;
6425 pCtx->cs.Sel = IdtEntry.uSel;
6426 pCtx->cs.ValidSel = IdtEntry.uSel;
6427 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
6428 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
6429 && uVector == X86_XCPT_PF)
6430 pCtx->cr2 = GCPtrFault;
6431
6432 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
6433 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
6434 | HM_CHANGED_GUEST_RSP);
6435
6436 /*
6437 * If we delivered a hardware exception (other than an NMI) and if there was
6438 * block-by-STI in effect, we should clear it.
6439 */
6440 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
6441 {
6442 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
6443 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
6444 Log4Func(("Clearing inhibition due to STI\n"));
6445 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
6446 }
6447
6448 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
6449 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
6450
6451 /*
6452 * The event has been truly dispatched to the guest. Mark it as no longer pending so
6453 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
6454 */
6455 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
6456
6457 /*
6458 * If we eventually support nested-guest execution without unrestricted guest execution,
6459 * we should set fInterceptEvents here.
6460 */
6461 Assert(!fIsNestedGuest);
6462
6463 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
6464 if (fStepping)
6465 rcStrict = VINF_EM_DBG_STEPPED;
6466 }
6467 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
6468 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6469 return rcStrict;
6470 }
6471#else
6472 RT_NOREF(pVmcsInfo);
6473#endif
6474 }
6475
6476 /*
6477 * Validate.
6478 */
6479 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
6480 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
6481
6482 /*
6483 * Inject the event into the VMCS.
6484 */
6485 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
6486 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
6487 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
6488 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
6489 AssertRC(rc);
6490
6491 /*
6492 * Update guest CR2 if this is a page-fault.
6493 */
6494 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
6495 pCtx->cr2 = GCPtrFault;
6496
6497 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
6498 return VINF_SUCCESS;
6499}
6500
6501
6502/**
6503 * Evaluates the event to be delivered to the guest and sets it as the pending
6504 * event.
6505 *
6506 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
6507 * exits to ring-3 before executing guest code, see vmxHCExitToRing3(). We must
6508 * NOT restore these force-flags.
6509 *
6510 * @returns Strict VBox status code (i.e. informational status codes too).
6511 * @param pVCpu The cross context virtual CPU structure.
6512 * @param pVmcsInfo The VMCS information structure.
6513 * @param fIsNestedGuest Flag whether the evaluation happens for a nestd guest.
6514 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
6515 */
6516static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
6517{
6518 Assert(pfIntrState);
6519 Assert(!TRPMHasTrap(pVCpu));
6520
6521 /*
6522 * Compute/update guest-interruptibility state related FFs.
6523 * The FFs will be used below while evaluating events to be injected.
6524 */
6525 *pfIntrState = vmxHCGetGuestIntrStateAndUpdateFFs(pVCpu);
6526
6527 /*
6528 * Evaluate if a new event needs to be injected.
6529 * An event that's already pending has already performed all necessary checks.
6530 */
6531 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
6532 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6533 {
6534 /** @todo SMI. SMIs take priority over NMIs. */
6535
6536 /*
6537 * NMIs.
6538 * NMIs take priority over external interrupts.
6539 */
6540#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6541 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6542#endif
6543 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
6544 {
6545 /*
6546 * For a guest, the FF always indicates the guest's ability to receive an NMI.
6547 *
6548 * For a nested-guest, the FF always indicates the outer guest's ability to
6549 * receive an NMI while the guest-interruptibility state bit depends on whether
6550 * the nested-hypervisor is using virtual-NMIs.
6551 */
6552 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
6553 {
6554#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6555 if ( fIsNestedGuest
6556 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
6557 return IEMExecVmxVmexitXcptNmi(pVCpu);
6558#endif
6559 vmxHCSetPendingXcptNmi(pVCpu);
6560 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
6561 Log4Func(("NMI pending injection\n"));
6562
6563 /* We've injected the NMI, bail. */
6564 return VINF_SUCCESS;
6565 }
6566 else if (!fIsNestedGuest)
6567 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
6568 }
6569
6570 /*
6571 * External interrupts (PIC/APIC).
6572 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
6573 * We cannot re-request the interrupt from the controller again.
6574 */
6575 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
6576 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
6577 {
6578 Assert(!DBGFIsStepping(pVCpu));
6579 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
6580 AssertRC(rc);
6581
6582 /*
6583 * We must not check EFLAGS directly when executing a nested-guest, use
6584 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
6585 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
6586 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
6587 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
6588 *
6589 * See Intel spec. 25.4.1 "Event Blocking".
6590 */
6591 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
6592 {
6593#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6594 if ( fIsNestedGuest
6595 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
6596 {
6597 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
6598 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6599 return rcStrict;
6600 }
6601#endif
6602 uint8_t u8Interrupt;
6603 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
6604 if (RT_SUCCESS(rc))
6605 {
6606#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6607 if ( fIsNestedGuest
6608 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
6609 {
6610 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
6611 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
6612 return rcStrict;
6613 }
6614#endif
6615 vmxHCSetPendingExtInt(pVCpu, u8Interrupt);
6616 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
6617 }
6618 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
6619 {
6620 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchTprMaskedIrq);
6621
6622 if ( !fIsNestedGuest
6623 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
6624 vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
6625 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
6626
6627 /*
6628 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
6629 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
6630 * need to re-set this force-flag here.
6631 */
6632 }
6633 else
6634 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchGuestIrq);
6635
6636 /* We've injected the interrupt or taken necessary action, bail. */
6637 return VINF_SUCCESS;
6638 }
6639 if (!fIsNestedGuest)
6640 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
6641 }
6642 }
6643 else if (!fIsNestedGuest)
6644 {
6645 /*
6646 * An event is being injected or we are in an interrupt shadow. Check if another event is
6647 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
6648 * the pending event.
6649 */
6650 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
6651 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
6652 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
6653 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
6654 vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
6655 }
6656 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
6657
6658 return VINF_SUCCESS;
6659}
6660
6661
6662/**
6663 * Injects any pending events into the guest if the guest is in a state to
6664 * receive them.
6665 *
6666 * @returns Strict VBox status code (i.e. informational status codes too).
6667 * @param pVCpu The cross context virtual CPU structure.
6668 * @param fIsNestedGuest Flag whether the event injection happens for a nested guest.
6669 * @param fIntrState The VT-x guest-interruptibility state.
6670 * @param fStepping Whether we are single-stepping the guest using the
6671 * hypervisor debugger and should return
6672 * VINF_EM_DBG_STEPPED if the event was dispatched
6673 * directly.
6674 */
6675static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t fIntrState, bool fStepping)
6676{
6677 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
6678#ifdef IN_RING0
6679 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6680#endif
6681
6682#ifdef VBOX_STRICT
6683 /*
6684 * Verify guest-interruptibility state.
6685 *
6686 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
6687 * since injecting an event may modify the interruptibility state and we must thus always
6688 * use fIntrState.
6689 */
6690 {
6691 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
6692 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
6693 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
6694 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
6695 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
6696 Assert(!TRPMHasTrap(pVCpu));
6697 NOREF(fBlockMovSS); NOREF(fBlockSti);
6698 }
6699#endif
6700
6701 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
6702 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
6703 {
6704 /*
6705 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
6706 * pending even while injecting an event and in this case, we want a VM-exit as soon as
6707 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
6708 *
6709 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
6710 */
6711 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
6712#ifdef VBOX_STRICT
6713 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
6714 {
6715 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
6716 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
6717 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
6718 }
6719 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
6720 {
6721 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
6722 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
6723 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
6724 }
6725#endif
6726 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
6727 uIntType));
6728
6729 /*
6730 * Inject the event and get any changes to the guest-interruptibility state.
6731 *
6732 * The guest-interruptibility state may need to be updated if we inject the event
6733 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
6734 */
6735 rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
6736 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
6737
6738 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
6739 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterrupt);
6740 else
6741 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectXcpt);
6742 }
6743
6744 /*
6745 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
6746 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
6747 */
6748 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
6749 && !fIsNestedGuest)
6750 {
6751 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
6752
6753 if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
6754 {
6755 /*
6756 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
6757 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
6758 */
6759 Assert(!DBGFIsStepping(pVCpu));
6760 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
6761 int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
6762 AssertRC(rc);
6763 }
6764 else
6765 {
6766 /*
6767 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
6768 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
6769 * we take care of this case in vmxHCExportSharedDebugState and also the case if
6770 * we use MTF, so just make sure it's called before executing guest-code.
6771 */
6772 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
6773 }
6774 }
6775 /* else: for nested-guest currently handling while merging controls. */
6776
6777 /*
6778 * Finally, update the guest-interruptibility state.
6779 *
6780 * This is required for the real-on-v86 software interrupt injection, for
6781 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
6782 */
6783 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
6784 AssertRC(rc);
6785
6786 /*
6787 * There's no need to clear the VM-entry interruption-information field here if we're not
6788 * injecting anything. VT-x clears the valid bit on every VM-exit.
6789 *
6790 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6791 */
6792
6793 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
6794 return rcStrict;
6795}
6796
6797
6798/**
6799 * Exports the guest state into the VMCS guest-state area.
6800 *
6801 * The will typically be done before VM-entry when the guest-CPU state and the
6802 * VMCS state may potentially be out of sync.
6803 *
6804 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas. Sets up the
6805 * VM-entry controls.
6806 * Sets up the appropriate VMX non-root function to execute guest code based on
6807 * the guest CPU mode.
6808 *
6809 * @returns VBox strict status code.
6810 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
6811 * without unrestricted guest execution and the VMMDev is not presently
6812 * mapped (e.g. EFI32).
6813 *
6814 * @param pVCpu The cross context virtual CPU structure.
6815 * @param pVmxTransient The VMX-transient structure.
6816 *
6817 * @remarks No-long-jump zone!!!
6818 */
6819static VBOXSTRICTRC vmxHCExportGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6820{
6821 AssertPtr(pVCpu);
6822 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
6823 LogFlowFunc(("pVCpu=%p\n", pVCpu));
6824
6825 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExportGuestState, x);
6826
6827#ifdef IN_RING0
6828 /*
6829 * Determine real-on-v86 mode.
6830 * Used when the guest is in real-mode and unrestricted guest execution is not used.
6831 */
6832 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmxTransient->pVmcsInfo->pShared;
6833 if ( pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest
6834 || !CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx))
6835 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
6836 else
6837 {
6838 Assert(!pVmxTransient->fIsNestedGuest);
6839 pVmcsInfoShared->RealMode.fRealOnV86Active = true;
6840 }
6841#endif
6842
6843 /*
6844 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
6845 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
6846 */
6847 int rc = vmxHCExportGuestEntryExitCtls(pVCpu, pVmxTransient);
6848 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
6849
6850 rc = vmxHCExportGuestCR0(pVCpu, pVmxTransient);
6851 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
6852
6853 VBOXSTRICTRC rcStrict = vmxHCExportGuestCR3AndCR4(pVCpu, pVmxTransient);
6854 if (rcStrict == VINF_SUCCESS)
6855 { /* likely */ }
6856 else
6857 {
6858 Assert(rcStrict == VINF_EM_RESCHEDULE_REM || RT_FAILURE_NP(rcStrict));
6859 return rcStrict;
6860 }
6861
6862 rc = vmxHCExportGuestSegRegsXdtr(pVCpu, pVmxTransient);
6863 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
6864
6865 rc = vmxHCExportGuestMsrs(pVCpu, pVmxTransient);
6866 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
6867
6868 vmxHCExportGuestApicTpr(pVCpu, pVmxTransient);
6869 vmxHCExportGuestXcptIntercepts(pVCpu, pVmxTransient);
6870 vmxHCExportGuestRip(pVCpu);
6871 vmxHCExportGuestRsp(pVCpu);
6872 vmxHCExportGuestRflags(pVCpu, pVmxTransient);
6873
6874 rc = vmxHCExportGuestHwvirtState(pVCpu, pVmxTransient);
6875 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
6876
6877 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
6878 ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~( (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)
6879 | HM_CHANGED_GUEST_CR2
6880 | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7)
6881 | HM_CHANGED_GUEST_X87
6882 | HM_CHANGED_GUEST_SSE_AVX
6883 | HM_CHANGED_GUEST_OTHER_XSAVE
6884 | HM_CHANGED_GUEST_XCRx
6885 | HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */
6886 | HM_CHANGED_GUEST_SYSCALL_MSRS /* Part of lazy or auto load-store MSRs. */
6887 | HM_CHANGED_GUEST_TSC_AUX
6888 | HM_CHANGED_GUEST_OTHER_MSRS
6889 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
6890
6891 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExportGuestState, x);
6892 return rc;
6893}
6894
6895
6896/**
6897 * Exports the state shared between the host and guest into the VMCS.
6898 *
6899 * @param pVCpu The cross context virtual CPU structure.
6900 * @param pVmxTransient The VMX-transient structure.
6901 *
6902 * @remarks No-long-jump zone!!!
6903 */
6904static void vmxHCExportSharedState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6905{
6906#ifdef IN_RING0
6907 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6908 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6909#endif
6910
6911 if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
6912 {
6913 int rc = vmxHCExportSharedDebugState(pVCpu, pVmxTransient);
6914 AssertRC(rc);
6915 VCPU_2_VMXSTATE(pVCpu).fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
6916
6917 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
6918 if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
6919 vmxHCExportGuestRflags(pVCpu, pVmxTransient);
6920 }
6921
6922#ifdef IN_RING0
6923 if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS)
6924 {
6925 vmxHCLazyLoadGuestMsrs(pVCpu);
6926 VCPU_2_VMXSTATE(pVCpu).fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS;
6927 }
6928#endif
6929
6930 AssertMsg(!(VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE),
6931 ("fCtxChanged=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).fCtxChanged));
6932}
6933
6934
6935/**
6936 * Worker for loading the guest-state bits in the inner VT-x execution loop.
6937 *
6938 * @returns Strict VBox status code (i.e. informational status codes too).
6939 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
6940 * without unrestricted guest execution and the VMMDev is not presently
6941 * mapped (e.g. EFI32).
6942 *
6943 * @param pVCpu The cross context virtual CPU structure.
6944 * @param pVmxTransient The VMX-transient structure.
6945 *
6946 * @remarks No-long-jump zone!!!
6947 */
6948static VBOXSTRICTRC vmxHCExportGuestStateOptimal(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6949{
6950 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
6951#ifdef IN_RING0
6952 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6953#endif
6954
6955#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
6956 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
6957#endif
6958
6959 /*
6960 * For many VM-exits only RIP/RSP/RFLAGS (and HWVIRT state when executing a nested-guest)
6961 * changes. First try to export only these without going through all other changed-flag checks.
6962 */
6963 VBOXSTRICTRC rcStrict;
6964 uint64_t const fCtxMask = HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE;
6965 uint64_t const fMinimalMask = HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT;
6966 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged);
6967
6968 /* If only RIP/RSP/RFLAGS/HWVIRT changed, export only those (quicker, happens more often).*/
6969 if ( (fCtxChanged & fMinimalMask)
6970 && !(fCtxChanged & (fCtxMask & ~fMinimalMask)))
6971 {
6972 vmxHCExportGuestRip(pVCpu);
6973 vmxHCExportGuestRsp(pVCpu);
6974 vmxHCExportGuestRflags(pVCpu, pVmxTransient);
6975 rcStrict = vmxHCExportGuestHwvirtState(pVCpu, pVmxTransient);
6976 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExportMinimal);
6977 }
6978 /* If anything else also changed, go through the full export routine and export as required. */
6979 else if (fCtxChanged & fCtxMask)
6980 {
6981 rcStrict = vmxHCExportGuestState(pVCpu, pVmxTransient);
6982 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
6983 { /* likely */}
6984 else
6985 {
6986 AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("Failed to export guest state! rc=%Rrc\n",
6987 VBOXSTRICTRC_VAL(rcStrict)));
6988#ifdef IN_RING0
6989 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6990#endif
6991 return rcStrict;
6992 }
6993 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExportFull);
6994 }
6995 /* Nothing changed, nothing to load here. */
6996 else
6997 rcStrict = VINF_SUCCESS;
6998
6999#ifdef VBOX_STRICT
7000 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
7001 uint64_t const fCtxChangedCur = ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged);
7002 AssertMsg(!(fCtxChangedCur & fCtxMask), ("fCtxChangedCur=%#RX64\n", fCtxChangedCur));
7003#endif
7004 return rcStrict;
7005}
7006
7007
7008/**
7009 * Tries to determine what part of the guest-state VT-x has deemed as invalid
7010 * and update error record fields accordingly.
7011 *
7012 * @returns VMX_IGS_* error codes.
7013 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
7014 * wrong with the guest state.
7015 *
7016 * @param pVCpu The cross context virtual CPU structure.
7017 * @param pVmcsInfo The VMCS info. object.
7018 *
7019 * @remarks This function assumes our cache of the VMCS controls
7020 * are valid, i.e. vmxHCCheckCachedVmcsCtls() succeeded.
7021 */
7022static uint32_t vmxHCCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
7023{
7024#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
7025#define HMVMX_CHECK_BREAK(expr, err) do { \
7026 if (!(expr)) { uError = (err); break; } \
7027 } while (0)
7028
7029 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7030 uint32_t uError = VMX_IGS_ERROR;
7031 uint32_t u32IntrState = 0;
7032#ifdef IN_RING0
7033 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7034 bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
7035#else
7036 bool const fUnrestrictedGuest = true;
7037#endif
7038 do
7039 {
7040 int rc;
7041
7042 /*
7043 * Guest-interruptibility state.
7044 *
7045 * Read this first so that any check that fails prior to those that actually
7046 * require the guest-interruptibility state would still reflect the correct
7047 * VMCS value and avoids causing further confusion.
7048 */
7049 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
7050 AssertRC(rc);
7051
7052 uint32_t u32Val;
7053 uint64_t u64Val;
7054
7055 /*
7056 * CR0.
7057 */
7058 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
7059 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
7060 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
7061 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
7062 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
7063 if (fUnrestrictedGuest)
7064 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
7065
7066 uint64_t u64GuestCr0;
7067 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64GuestCr0);
7068 AssertRC(rc);
7069 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
7070 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
7071 if ( !fUnrestrictedGuest
7072 && (u64GuestCr0 & X86_CR0_PG)
7073 && !(u64GuestCr0 & X86_CR0_PE))
7074 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
7075
7076 /*
7077 * CR4.
7078 */
7079 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
7080 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
7081 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
7082
7083 uint64_t u64GuestCr4;
7084 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR4, &u64GuestCr4);
7085 AssertRC(rc);
7086 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
7087 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
7088
7089 /*
7090 * IA32_DEBUGCTL MSR.
7091 */
7092 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
7093 AssertRC(rc);
7094 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
7095 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
7096 {
7097 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
7098 }
7099 uint64_t u64DebugCtlMsr = u64Val;
7100
7101#ifdef VBOX_STRICT
7102 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &u32Val);
7103 AssertRC(rc);
7104 Assert(u32Val == pVmcsInfo->u32EntryCtls);
7105#endif
7106 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
7107
7108 /*
7109 * RIP and RFLAGS.
7110 */
7111 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RIP, &u64Val);
7112 AssertRC(rc);
7113 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
7114 if ( !fLongModeGuest
7115 || !pCtx->cs.Attr.n.u1Long)
7116 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
7117 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
7118 * must be identical if the "IA-32e mode guest" VM-entry
7119 * control is 1 and CS.L is 1. No check applies if the
7120 * CPU supports 64 linear-address bits. */
7121
7122 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
7123 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_RFLAGS, &u64Val);
7124 AssertRC(rc);
7125 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
7126 VMX_IGS_RFLAGS_RESERVED);
7127 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
7128 uint32_t const u32Eflags = u64Val;
7129
7130 if ( fLongModeGuest
7131 || ( fUnrestrictedGuest
7132 && !(u64GuestCr0 & X86_CR0_PE)))
7133 {
7134 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
7135 }
7136
7137 uint32_t u32EntryInfo;
7138 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
7139 AssertRC(rc);
7140 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
7141 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
7142
7143 /*
7144 * 64-bit checks.
7145 */
7146 if (fLongModeGuest)
7147 {
7148 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
7149 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
7150 }
7151
7152 if ( !fLongModeGuest
7153 && (u64GuestCr4 & X86_CR4_PCIDE))
7154 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
7155
7156 /** @todo CR3 field must be such that bits 63:52 and bits in the range
7157 * 51:32 beyond the processor's physical-address width are 0. */
7158
7159 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
7160 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
7161 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
7162
7163#ifdef IN_RING0
7164 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
7165 AssertRC(rc);
7166 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
7167
7168 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
7169 AssertRC(rc);
7170 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
7171#endif
7172
7173 /*
7174 * PERF_GLOBAL MSR.
7175 */
7176 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
7177 {
7178 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
7179 AssertRC(rc);
7180 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
7181 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
7182 }
7183
7184 /*
7185 * PAT MSR.
7186 */
7187 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
7188 {
7189 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
7190 AssertRC(rc);
7191 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
7192 for (unsigned i = 0; i < 8; i++)
7193 {
7194 uint8_t u8Val = (u64Val & 0xff);
7195 if ( u8Val != 0 /* UC */
7196 && u8Val != 1 /* WC */
7197 && u8Val != 4 /* WT */
7198 && u8Val != 5 /* WP */
7199 && u8Val != 6 /* WB */
7200 && u8Val != 7 /* UC- */)
7201 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
7202 u64Val >>= 8;
7203 }
7204 }
7205
7206 /*
7207 * EFER MSR.
7208 */
7209 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
7210 {
7211 Assert(g_fHmVmxSupportsVmcsEfer);
7212 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
7213 AssertRC(rc);
7214 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
7215 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
7216 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
7217 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
7218 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
7219 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
7220 * iemVmxVmentryCheckGuestState(). */
7221 HMVMX_CHECK_BREAK( fUnrestrictedGuest
7222 || !(u64GuestCr0 & X86_CR0_PG)
7223 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
7224 VMX_IGS_EFER_LMA_LME_MISMATCH);
7225 }
7226
7227 /*
7228 * Segment registers.
7229 */
7230 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
7231 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
7232 if (!(u32Eflags & X86_EFL_VM))
7233 {
7234 /* CS */
7235 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
7236 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
7237 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
7238 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
7239 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
7240 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
7241 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
7242 /* CS cannot be loaded with NULL in protected mode. */
7243 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
7244 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
7245 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
7246 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
7247 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
7248 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
7249 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
7250 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
7251 else
7252 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
7253
7254 /* SS */
7255 HMVMX_CHECK_BREAK( fUnrestrictedGuest
7256 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
7257 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
7258 if ( !(pCtx->cr0 & X86_CR0_PE)
7259 || pCtx->cs.Attr.n.u4Type == 3)
7260 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
7261
7262 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
7263 {
7264 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
7265 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
7266 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
7267 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
7268 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
7269 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
7270 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
7271 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
7272 }
7273
7274 /* DS, ES, FS, GS - only check for usable selectors, see vmxHCExportGuestSReg(). */
7275 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
7276 {
7277 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
7278 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
7279 HMVMX_CHECK_BREAK( fUnrestrictedGuest
7280 || pCtx->ds.Attr.n.u4Type > 11
7281 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
7282 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
7283 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
7284 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
7285 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
7286 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
7287 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
7288 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
7289 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
7290 }
7291 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
7292 {
7293 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
7294 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
7295 HMVMX_CHECK_BREAK( fUnrestrictedGuest
7296 || pCtx->es.Attr.n.u4Type > 11
7297 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
7298 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
7299 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
7300 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
7301 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
7302 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
7303 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
7304 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
7305 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
7306 }
7307 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
7308 {
7309 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
7310 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
7311 HMVMX_CHECK_BREAK( fUnrestrictedGuest
7312 || pCtx->fs.Attr.n.u4Type > 11
7313 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
7314 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
7315 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
7316 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
7317 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
7318 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
7319 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
7320 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
7321 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
7322 }
7323 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
7324 {
7325 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
7326 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
7327 HMVMX_CHECK_BREAK( fUnrestrictedGuest
7328 || pCtx->gs.Attr.n.u4Type > 11
7329 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
7330 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
7331 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
7332 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
7333 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
7334 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
7335 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
7336 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
7337 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
7338 }
7339 /* 64-bit capable CPUs. */
7340 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
7341 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
7342 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
7343 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
7344 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
7345 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
7346 VMX_IGS_LONGMODE_SS_BASE_INVALID);
7347 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
7348 VMX_IGS_LONGMODE_DS_BASE_INVALID);
7349 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
7350 VMX_IGS_LONGMODE_ES_BASE_INVALID);
7351 }
7352 else
7353 {
7354 /* V86 mode checks. */
7355 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
7356 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
7357 {
7358 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
7359 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
7360 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
7361 }
7362 else
7363 {
7364 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
7365 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
7366 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
7367 }
7368
7369 /* CS */
7370 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
7371 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
7372 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
7373 /* SS */
7374 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
7375 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
7376 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
7377 /* DS */
7378 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
7379 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
7380 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
7381 /* ES */
7382 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
7383 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
7384 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
7385 /* FS */
7386 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
7387 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
7388 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
7389 /* GS */
7390 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
7391 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
7392 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
7393 /* 64-bit capable CPUs. */
7394 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
7395 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
7396 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
7397 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
7398 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
7399 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
7400 VMX_IGS_LONGMODE_SS_BASE_INVALID);
7401 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
7402 VMX_IGS_LONGMODE_DS_BASE_INVALID);
7403 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
7404 VMX_IGS_LONGMODE_ES_BASE_INVALID);
7405 }
7406
7407 /*
7408 * TR.
7409 */
7410 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
7411 /* 64-bit capable CPUs. */
7412 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
7413 if (fLongModeGuest)
7414 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
7415 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
7416 else
7417 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
7418 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
7419 VMX_IGS_TR_ATTR_TYPE_INVALID);
7420 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
7421 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
7422 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
7423 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
7424 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
7425 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
7426 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
7427 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
7428
7429 /*
7430 * GDTR and IDTR (64-bit capable checks).
7431 */
7432 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
7433 AssertRC(rc);
7434 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
7435
7436 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
7437 AssertRC(rc);
7438 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
7439
7440 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
7441 AssertRC(rc);
7442 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
7443
7444 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
7445 AssertRC(rc);
7446 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
7447
7448 /*
7449 * Guest Non-Register State.
7450 */
7451 /* Activity State. */
7452 uint32_t u32ActivityState;
7453 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
7454 AssertRC(rc);
7455 HMVMX_CHECK_BREAK( !u32ActivityState
7456 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
7457 VMX_IGS_ACTIVITY_STATE_INVALID);
7458 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
7459 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
7460
7461 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
7462 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7463 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
7464
7465 /** @todo Activity state and injecting interrupts. Left as a todo since we
7466 * currently don't use activity states but ACTIVE. */
7467
7468 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
7469 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
7470
7471 /* Guest interruptibility-state. */
7472 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
7473 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
7474 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
7475 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
7476 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
7477 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
7478 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
7479 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
7480 {
7481 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7482 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
7483 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
7484 }
7485 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
7486 {
7487 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
7488 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
7489 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
7490 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
7491 }
7492 /** @todo Assumes the processor is not in SMM. */
7493 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
7494 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
7495 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
7496 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
7497 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
7498 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
7499 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
7500 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
7501
7502 /* Pending debug exceptions. */
7503 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
7504 AssertRC(rc);
7505 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
7506 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
7507 u32Val = u64Val; /* For pending debug exceptions checks below. */
7508
7509 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
7510 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
7511 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
7512 {
7513 if ( (u32Eflags & X86_EFL_TF)
7514 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
7515 {
7516 /* Bit 14 is PendingDebug.BS. */
7517 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
7518 }
7519 if ( !(u32Eflags & X86_EFL_TF)
7520 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
7521 {
7522 /* Bit 14 is PendingDebug.BS. */
7523 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
7524 }
7525 }
7526
7527#ifdef IN_RING0
7528 /* VMCS link pointer. */
7529 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
7530 AssertRC(rc);
7531 if (u64Val != UINT64_C(0xffffffffffffffff))
7532 {
7533 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
7534 /** @todo Bits beyond the processor's physical-address width MBZ. */
7535 /** @todo SMM checks. */
7536 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
7537 Assert(pVmcsInfo->pvShadowVmcs);
7538 VMXVMCSREVID VmcsRevId;
7539 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
7540 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
7541 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
7542 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
7543 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
7544 }
7545
7546 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
7547 * not using nested paging? */
7548 if ( VM_IS_VMX_NESTED_PAGING(pVM)
7549 && !fLongModeGuest
7550 && CPUMIsGuestInPAEModeEx(pCtx))
7551 {
7552 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
7553 AssertRC(rc);
7554 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
7555
7556 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
7557 AssertRC(rc);
7558 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
7559
7560 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
7561 AssertRC(rc);
7562 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
7563
7564 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
7565 AssertRC(rc);
7566 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
7567 }
7568#endif
7569
7570 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
7571 if (uError == VMX_IGS_ERROR)
7572 uError = VMX_IGS_REASON_NOT_FOUND;
7573 } while (0);
7574
7575 VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
7576 VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
7577 return uError;
7578
7579#undef HMVMX_ERROR_BREAK
7580#undef HMVMX_CHECK_BREAK
7581}
7582
7583
7584#ifdef IN_RING0
7585/**
7586 * Map the APIC-access page for virtualizing APIC accesses.
7587 *
7588 * This can cause a longjumps to R3 due to the acquisition of the PGM lock. Hence,
7589 * this not done as part of exporting guest state, see @bugref{8721}.
7590 *
7591 * @returns VBox status code.
7592 * @param pVCpu The cross context virtual CPU structure.
7593 */
7594static int vmxHCMapHCApicAccessPage(PVMCPUCC pVCpu)
7595{
7596 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7597 uint64_t const u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);
7598
7599 Assert(PDMHasApic(pVM));
7600 Assert(u64MsrApicBase);
7601
7602 RTGCPHYS const GCPhysApicBase = u64MsrApicBase & PAGE_BASE_GC_MASK;
7603 Log4Func(("Mappping HC APIC-access page at %#RGp\n", GCPhysApicBase));
7604
7605 /* Unalias the existing mapping. */
7606 int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
7607 AssertRCReturn(rc, rc);
7608
7609 /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */
7610 Assert(pVM->hmr0.s.vmx.HCPhysApicAccess != NIL_RTHCPHYS);
7611 rc = IOMR0MmioMapMmioHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hmr0.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
7612 AssertRCReturn(rc, rc);
7613
7614 /* Update the per-VCPU cache of the APIC base MSR. */
7615 VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase = u64MsrApicBase;
7616 return VINF_SUCCESS;
7617}
7618
7619
7620/**
7621 * Worker function passed to RTMpOnSpecific() that is to be called on the target
7622 * CPU.
7623 *
7624 * @param idCpu The ID for the CPU the function is called on.
7625 * @param pvUser1 Null, not used.
7626 * @param pvUser2 Null, not used.
7627 */
7628static DECLCALLBACK(void) hmR0DispatchHostNmi(RTCPUID idCpu, void *pvUser1, void *pvUser2)
7629{
7630 RT_NOREF3(idCpu, pvUser1, pvUser2);
7631 VMXDispatchHostNmi();
7632}
7633
7634
7635/**
7636 * Dispatching an NMI on the host CPU that received it.
7637 *
7638 * @returns VBox status code.
7639 * @param pVCpu The cross context virtual CPU structure.
7640 * @param pVmcsInfo The VMCS info. object corresponding to the VMCS that was
7641 * executing when receiving the host NMI in VMX non-root
7642 * operation.
7643 */
7644static int vmxHCExitHostNmi(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
7645{
7646 RTCPUID const idCpu = pVmcsInfo->idHostCpuExec;
7647 Assert(idCpu != NIL_RTCPUID);
7648
7649 /*
7650 * We don't want to delay dispatching the NMI any more than we have to. However,
7651 * we have already chosen -not- to dispatch NMIs when interrupts were still disabled
7652 * after executing guest or nested-guest code for the following reasons:
7653 *
7654 * - We would need to perform VMREADs with interrupts disabled and is orders of
7655 * magnitude worse when we run as a nested hypervisor without VMCS shadowing
7656 * supported by the host hypervisor.
7657 *
7658 * - It affects the common VM-exit scenario and keeps interrupts disabled for a
7659 * longer period of time just for handling an edge case like host NMIs which do
7660 * not occur nearly as frequently as other VM-exits.
7661 *
7662 * Let's cover the most likely scenario first. Check if we are on the target CPU
7663 * and dispatch the NMI right away. This should be much faster than calling into
7664 * RTMpOnSpecific() machinery.
7665 */
7666 bool fDispatched = false;
7667 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
7668 if (idCpu == RTMpCpuId())
7669 {
7670 VMXDispatchHostNmi();
7671 fDispatched = true;
7672 }
7673 ASMSetFlags(fEFlags);
7674 if (fDispatched)
7675 {
7676 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitHostNmiInGC);
7677 return VINF_SUCCESS;
7678 }
7679
7680 /*
7681 * RTMpOnSpecific() waits until the worker function has run on the target CPU. So
7682 * there should be no race or recursion even if we are unlucky enough to be preempted
7683 * (to the target CPU) without dispatching the host NMI above.
7684 */
7685 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitHostNmiInGCIpi);
7686 return RTMpOnSpecific(idCpu, &hmR0DispatchHostNmi, NULL /* pvUser1 */, NULL /* pvUser2 */);
7687}
7688
7689
7690#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7691/**
7692 * Merges the guest with the nested-guest MSR bitmap in preparation of executing the
7693 * nested-guest using hardware-assisted VMX.
7694 *
7695 * @param pVCpu The cross context virtual CPU structure.
7696 * @param pVmcsInfoNstGst The nested-guest VMCS info. object.
7697 * @param pVmcsInfoGst The guest VMCS info. object.
7698 */
7699static void vmxHCMergeMsrBitmapNested(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfoNstGst, PCVMXVMCSINFO pVmcsInfoGst)
7700{
7701 uint32_t const cbMsrBitmap = X86_PAGE_4K_SIZE;
7702 uint64_t *pu64MsrBitmap = (uint64_t *)pVmcsInfoNstGst->pvMsrBitmap;
7703 Assert(pu64MsrBitmap);
7704
7705 /*
7706 * We merge the guest MSR bitmap with the nested-guest MSR bitmap such that any
7707 * MSR that is intercepted by the guest is also intercepted while executing the
7708 * nested-guest using hardware-assisted VMX.
7709 *
7710 * Note! If the nested-guest is not using an MSR bitmap, every MSR must cause a
7711 * nested-guest VM-exit even if the outer guest is not intercepting some
7712 * MSRs. We cannot assume the caller has initialized the nested-guest
7713 * MSR bitmap in this case.
7714 *
7715 * The nested hypervisor may also switch whether it uses MSR bitmaps for
7716 * each of its VM-entry, hence initializing it once per-VM while setting
7717 * up the nested-guest VMCS is not sufficient.
7718 */
7719 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7720 if (pVmcsNstGst->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7721 {
7722 uint64_t const *pu64MsrBitmapNstGst = (uint64_t const *)&pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap[0];
7723 uint64_t const *pu64MsrBitmapGst = (uint64_t const *)pVmcsInfoGst->pvMsrBitmap;
7724 Assert(pu64MsrBitmapNstGst);
7725 Assert(pu64MsrBitmapGst);
7726
7727 /** @todo Detect and use EVEX.POR? */
7728 uint32_t const cFrags = cbMsrBitmap / sizeof(uint64_t);
7729 for (uint32_t i = 0; i < cFrags; i++)
7730 pu64MsrBitmap[i] = pu64MsrBitmapNstGst[i] | pu64MsrBitmapGst[i];
7731 }
7732 else
7733 ASMMemFill32(pu64MsrBitmap, cbMsrBitmap, UINT32_C(0xffffffff));
7734}
7735
7736
7737/**
7738 * Merges the guest VMCS in to the nested-guest VMCS controls in preparation of
7739 * hardware-assisted VMX execution of the nested-guest.
7740 *
7741 * For a guest, we don't modify these controls once we set up the VMCS and hence
7742 * this function is never called.
7743 *
7744 * For nested-guests since the nested hypervisor provides these controls on every
7745 * nested-guest VM-entry and could potentially change them everytime we need to
7746 * merge them before every nested-guest VM-entry.
7747 *
7748 * @returns VBox status code.
7749 * @param pVCpu The cross context virtual CPU structure.
7750 */
7751static int vmxHCMergeVmcsNested(PVMCPUCC pVCpu)
7752{
7753 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
7754 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
7755 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7756
7757 /*
7758 * Merge the controls with the requirements of the guest VMCS.
7759 *
7760 * We do not need to validate the nested-guest VMX features specified in the nested-guest
7761 * VMCS with the features supported by the physical CPU as it's already done by the
7762 * VMLAUNCH/VMRESUME instruction emulation.
7763 *
7764 * This is because the VMX features exposed by CPUM (through CPUID/MSRs) to the guest are
7765 * derived from the VMX features supported by the physical CPU.
7766 */
7767
7768 /* Pin-based VM-execution controls. */
7769 uint32_t const u32PinCtls = pVmcsNstGst->u32PinCtls | pVmcsInfoGst->u32PinCtls;
7770
7771 /* Processor-based VM-execution controls. */
7772 uint32_t u32ProcCtls = (pVmcsNstGst->u32ProcCtls & ~VMX_PROC_CTLS_USE_IO_BITMAPS)
7773 | (pVmcsInfoGst->u32ProcCtls & ~( VMX_PROC_CTLS_INT_WINDOW_EXIT
7774 | VMX_PROC_CTLS_NMI_WINDOW_EXIT
7775 | VMX_PROC_CTLS_MOV_DR_EXIT
7776 | VMX_PROC_CTLS_USE_TPR_SHADOW
7777 | VMX_PROC_CTLS_MONITOR_TRAP_FLAG));
7778
7779 /* Secondary processor-based VM-execution controls. */
7780 uint32_t const u32ProcCtls2 = (pVmcsNstGst->u32ProcCtls2 & ~VMX_PROC_CTLS2_VPID)
7781 | (pVmcsInfoGst->u32ProcCtls2 & ~( VMX_PROC_CTLS2_VIRT_APIC_ACCESS
7782 | VMX_PROC_CTLS2_INVPCID
7783 | VMX_PROC_CTLS2_VMCS_SHADOWING
7784 | VMX_PROC_CTLS2_RDTSCP
7785 | VMX_PROC_CTLS2_XSAVES_XRSTORS
7786 | VMX_PROC_CTLS2_APIC_REG_VIRT
7787 | VMX_PROC_CTLS2_VIRT_INT_DELIVERY
7788 | VMX_PROC_CTLS2_VMFUNC));
7789
7790 /*
7791 * VM-entry controls:
7792 * These controls contains state that depends on the nested-guest state (primarily
7793 * EFER MSR) and is thus not constant between VMLAUNCH/VMRESUME and the nested-guest
7794 * VM-exit. Although the nested hypervisor cannot change it, we need to in order to
7795 * properly continue executing the nested-guest if the EFER MSR changes but does not
7796 * cause a nested-guest VM-exits.
7797 *
7798 * VM-exit controls:
7799 * These controls specify the host state on return. We cannot use the controls from
7800 * the nested hypervisor state as is as it would contain the guest state rather than
7801 * the host state. Since the host state is subject to change (e.g. preemption, trips
7802 * to ring-3, longjmp and rescheduling to a different host CPU) they are not constant
7803 * through VMLAUNCH/VMRESUME and the nested-guest VM-exit.
7804 *
7805 * VM-entry MSR-load:
7806 * The guest MSRs from the VM-entry MSR-load area are already loaded into the guest-CPU
7807 * context by the VMLAUNCH/VMRESUME instruction emulation.
7808 *
7809 * VM-exit MSR-store:
7810 * The VM-exit emulation will take care of populating the MSRs from the guest-CPU context
7811 * back into the VM-exit MSR-store area.
7812 *
7813 * VM-exit MSR-load areas:
7814 * This must contain the real host MSRs with hardware-assisted VMX execution. Hence, we
7815 * can entirely ignore what the nested hypervisor wants to load here.
7816 */
7817
7818 /*
7819 * Exception bitmap.
7820 *
7821 * We could remove #UD from the guest bitmap and merge it with the nested-guest bitmap
7822 * here (and avoid doing anything while exporting nested-guest state), but to keep the
7823 * code more flexible if intercepting exceptions become more dynamic in the future we do
7824 * it as part of exporting the nested-guest state.
7825 */
7826 uint32_t const u32XcptBitmap = pVmcsNstGst->u32XcptBitmap | pVmcsInfoGst->u32XcptBitmap;
7827
7828 /*
7829 * CR0/CR4 guest/host mask.
7830 *
7831 * Modifications by the nested-guest to CR0/CR4 bits owned by the host and the guest must
7832 * cause VM-exits, so we need to merge them here.
7833 */
7834 uint64_t const u64Cr0Mask = pVmcsNstGst->u64Cr0Mask.u | pVmcsInfoGst->u64Cr0Mask;
7835 uint64_t const u64Cr4Mask = pVmcsNstGst->u64Cr4Mask.u | pVmcsInfoGst->u64Cr4Mask;
7836
7837 /*
7838 * Page-fault error-code mask and match.
7839 *
7840 * Although we require unrestricted guest execution (and thereby nested-paging) for
7841 * hardware-assisted VMX execution of nested-guests and thus the outer guest doesn't
7842 * normally intercept #PFs, it might intercept them for debugging purposes.
7843 *
7844 * If the outer guest is not intercepting #PFs, we can use the nested-guest #PF filters.
7845 * If the outer guest is intercepting #PFs, we must intercept all #PFs.
7846 */
7847 uint32_t u32XcptPFMask;
7848 uint32_t u32XcptPFMatch;
7849 if (!(pVmcsInfoGst->u32XcptBitmap & RT_BIT(X86_XCPT_PF)))
7850 {
7851 u32XcptPFMask = pVmcsNstGst->u32XcptPFMask;
7852 u32XcptPFMatch = pVmcsNstGst->u32XcptPFMatch;
7853 }
7854 else
7855 {
7856 u32XcptPFMask = 0;
7857 u32XcptPFMatch = 0;
7858 }
7859
7860 /*
7861 * Pause-Loop exiting.
7862 */
7863 /** @todo r=bird: given that both pVM->hm.s.vmx.cPleGapTicks and
7864 * pVM->hm.s.vmx.cPleWindowTicks defaults to zero, I cannot see how
7865 * this will work... */
7866 uint32_t const cPleGapTicks = RT_MIN(pVM->hm.s.vmx.cPleGapTicks, pVmcsNstGst->u32PleGap);
7867 uint32_t const cPleWindowTicks = RT_MIN(pVM->hm.s.vmx.cPleWindowTicks, pVmcsNstGst->u32PleWindow);
7868
7869 /*
7870 * Pending debug exceptions.
7871 * Currently just copy whatever the nested-guest provides us.
7872 */
7873 uint64_t const uPendingDbgXcpts = pVmcsNstGst->u64GuestPendingDbgXcpts.u;
7874
7875 /*
7876 * I/O Bitmap.
7877 *
7878 * We do not use the I/O bitmap that may be provided by the nested hypervisor as we always
7879 * intercept all I/O port accesses.
7880 */
7881 Assert(u32ProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT);
7882 Assert(!(u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS));
7883
7884 /*
7885 * VMCS shadowing.
7886 *
7887 * We do not yet expose VMCS shadowing to the guest and thus VMCS shadowing should not be
7888 * enabled while executing the nested-guest.
7889 */
7890 Assert(!(u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING));
7891
7892 /*
7893 * APIC-access page.
7894 */
7895 RTHCPHYS HCPhysApicAccess;
7896 if (u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
7897 {
7898 Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
7899 RTGCPHYS const GCPhysApicAccess = pVmcsNstGst->u64AddrApicAccess.u;
7900
7901 /** @todo NSTVMX: This is not really correct but currently is required to make
7902 * things work. We need to re-enable the page handler when we fallback to
7903 * IEM execution of the nested-guest! */
7904 PGMHandlerPhysicalPageTempOff(pVM, GCPhysApicAccess, GCPhysApicAccess);
7905
7906 void *pvPage;
7907 PGMPAGEMAPLOCK PgLockApicAccess;
7908 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysApicAccess, &pvPage, &PgLockApicAccess);
7909 if (RT_SUCCESS(rc))
7910 {
7911 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysApicAccess, &HCPhysApicAccess);
7912 AssertMsgRCReturn(rc, ("Failed to get host-physical address for APIC-access page at %#RGp\n", GCPhysApicAccess), rc);
7913
7914 /** @todo Handle proper releasing of page-mapping lock later. */
7915 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &PgLockApicAccess);
7916 }
7917 else
7918 return rc;
7919 }
7920 else
7921 HCPhysApicAccess = 0;
7922
7923 /*
7924 * Virtual-APIC page and TPR threshold.
7925 */
7926 RTHCPHYS HCPhysVirtApic;
7927 uint32_t u32TprThreshold;
7928 if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
7929 {
7930 Assert(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW);
7931 RTGCPHYS const GCPhysVirtApic = pVmcsNstGst->u64AddrVirtApic.u;
7932
7933 void *pvPage;
7934 PGMPAGEMAPLOCK PgLockVirtApic;
7935 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysVirtApic, &pvPage, &PgLockVirtApic);
7936 if (RT_SUCCESS(rc))
7937 {
7938 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysVirtApic, &HCPhysVirtApic);
7939 AssertMsgRCReturn(rc, ("Failed to get host-physical address for virtual-APIC page at %#RGp\n", GCPhysVirtApic), rc);
7940
7941 /** @todo Handle proper releasing of page-mapping lock later. */
7942 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &PgLockVirtApic);
7943 }
7944 else
7945 return rc;
7946
7947 u32TprThreshold = pVmcsNstGst->u32TprThreshold;
7948 }
7949 else
7950 {
7951 HCPhysVirtApic = 0;
7952 u32TprThreshold = 0;
7953
7954 /*
7955 * We must make sure CR8 reads/write must cause VM-exits when TPR shadowing is not
7956 * used by the nested hypervisor. Preventing MMIO accesses to the physical APIC will
7957 * be taken care of by EPT/shadow paging.
7958 */
7959 if (pVM->hmr0.s.fAllow64BitGuests)
7960 u32ProcCtls |= VMX_PROC_CTLS_CR8_STORE_EXIT
7961 | VMX_PROC_CTLS_CR8_LOAD_EXIT;
7962 }
7963
7964 /*
7965 * Validate basic assumptions.
7966 */
7967 PVMXVMCSINFO pVmcsInfoNstGst = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
7968 Assert(VM_IS_VMX_UNRESTRICTED_GUEST(pVM));
7969 Assert(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
7970 Assert(hmGetVmxActiveVmcsInfo(pVCpu) == pVmcsInfoNstGst);
7971
7972 /*
7973 * Commit it to the nested-guest VMCS.
7974 */
7975 int rc = VINF_SUCCESS;
7976 if (pVmcsInfoNstGst->u32PinCtls != u32PinCtls)
7977 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, u32PinCtls);
7978 if (pVmcsInfoNstGst->u32ProcCtls != u32ProcCtls)
7979 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, u32ProcCtls);
7980 if (pVmcsInfoNstGst->u32ProcCtls2 != u32ProcCtls2)
7981 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, u32ProcCtls2);
7982 if (pVmcsInfoNstGst->u32XcptBitmap != u32XcptBitmap)
7983 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
7984 if (pVmcsInfoNstGst->u64Cr0Mask != u64Cr0Mask)
7985 rc |= VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask);
7986 if (pVmcsInfoNstGst->u64Cr4Mask != u64Cr4Mask)
7987 rc |= VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask);
7988 if (pVmcsInfoNstGst->u32XcptPFMask != u32XcptPFMask)
7989 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, u32XcptPFMask);
7990 if (pVmcsInfoNstGst->u32XcptPFMatch != u32XcptPFMatch)
7991 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, u32XcptPFMatch);
7992 if ( !(u32ProcCtls & VMX_PROC_CTLS_PAUSE_EXIT)
7993 && (u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
7994 {
7995 Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
7996 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PLE_GAP, cPleGapTicks);
7997 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PLE_WINDOW, cPleWindowTicks);
7998 }
7999 if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
8000 {
8001 rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
8002 rc |= VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic);
8003 }
8004 if (u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
8005 rc |= VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, HCPhysApicAccess);
8006 rc |= VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, uPendingDbgXcpts);
8007 AssertRC(rc);
8008
8009 /*
8010 * Update the nested-guest VMCS cache.
8011 */
8012 pVmcsInfoNstGst->u32PinCtls = u32PinCtls;
8013 pVmcsInfoNstGst->u32ProcCtls = u32ProcCtls;
8014 pVmcsInfoNstGst->u32ProcCtls2 = u32ProcCtls2;
8015 pVmcsInfoNstGst->u32XcptBitmap = u32XcptBitmap;
8016 pVmcsInfoNstGst->u64Cr0Mask = u64Cr0Mask;
8017 pVmcsInfoNstGst->u64Cr4Mask = u64Cr4Mask;
8018 pVmcsInfoNstGst->u32XcptPFMask = u32XcptPFMask;
8019 pVmcsInfoNstGst->u32XcptPFMatch = u32XcptPFMatch;
8020 pVmcsInfoNstGst->HCPhysVirtApic = HCPhysVirtApic;
8021
8022 /*
8023 * We need to flush the TLB if we are switching the APIC-access page address.
8024 * See Intel spec. 28.3.3.4 "Guidelines for Use of the INVEPT Instruction".
8025 */
8026 if (u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
8027 VCPU_2_VMXSTATE(pVCpu).vmx.fSwitchedNstGstFlushTlb = true;
8028
8029 /*
8030 * MSR bitmap.
8031 *
8032 * The MSR bitmap address has already been initialized while setting up the nested-guest
8033 * VMCS, here we need to merge the MSR bitmaps.
8034 */
8035 if (u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
8036 vmxHCMergeMsrBitmapNested(pVCpu, pVmcsInfoNstGst, pVmcsInfoGst);
8037
8038 return VINF_SUCCESS;
8039}
8040#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
8041
8042
8043/**
8044 * Runs the guest code using hardware-assisted VMX the normal way.
8045 *
8046 * @returns VBox status code.
8047 * @param pVCpu The cross context virtual CPU structure.
8048 * @param pcLoops Pointer to the number of executed loops.
8049 */
8050static VBOXSTRICTRC vmxHCRunGuestCodeNormal(PVMCPUCC pVCpu, uint32_t *pcLoops)
8051{
8052 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
8053 Assert(pcLoops);
8054 Assert(*pcLoops <= cMaxResumeLoops);
8055 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
8056
8057#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8058 /*
8059 * Switch to the guest VMCS as we may have transitioned from executing the nested-guest
8060 * without leaving ring-0. Otherwise, if we came from ring-3 we would have loaded the
8061 * guest VMCS while entering the VMX ring-0 session.
8062 */
8063 if (pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs)
8064 {
8065 int rc = vmxHCSwitchToGstOrNstGstVmcs(pVCpu, false /* fSwitchToNstGstVmcs */);
8066 if (RT_SUCCESS(rc))
8067 { /* likely */ }
8068 else
8069 {
8070 LogRelFunc(("Failed to switch to the guest VMCS. rc=%Rrc\n", rc));
8071 return rc;
8072 }
8073 }
8074#endif
8075
8076 VMXTRANSIENT VmxTransient;
8077 RT_ZERO(VmxTransient);
8078 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
8079
8080 /* Paranoia. */
8081 Assert(VmxTransient.pVmcsInfo == &pVCpu->hmr0.s.vmx.VmcsInfo);
8082
8083 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
8084 for (;;)
8085 {
8086 Assert(!HMR0SuspendPending());
8087 HMVMX_ASSERT_CPU_SAFE(pVCpu);
8088 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
8089
8090 /*
8091 * Preparatory work for running nested-guest code, this may force us to
8092 * return to ring-3.
8093 *
8094 * Warning! This bugger disables interrupts on VINF_SUCCESS!
8095 */
8096 rcStrict = vmxHCPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
8097 if (rcStrict != VINF_SUCCESS)
8098 break;
8099
8100 /* Interrupts are disabled at this point! */
8101 vmxHCPreRunGuestCommitted(pVCpu, &VmxTransient);
8102 int rcRun = vmxHCRunGuest(pVCpu, &VmxTransient);
8103 vmxHCPostRunGuest(pVCpu, &VmxTransient, rcRun);
8104 /* Interrupts are re-enabled at this point! */
8105
8106 /*
8107 * Check for errors with running the VM (VMLAUNCH/VMRESUME).
8108 */
8109 if (RT_SUCCESS(rcRun))
8110 { /* very likely */ }
8111 else
8112 {
8113 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, x);
8114 vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
8115 return rcRun;
8116 }
8117
8118 /*
8119 * Profile the VM-exit.
8120 */
8121 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8122 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitAll);
8123 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8124 STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, &VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
8125 HMVMX_START_EXIT_DISPATCH_PROF();
8126
8127 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
8128
8129 /*
8130 * Handle the VM-exit.
8131 */
8132#ifdef HMVMX_USE_FUNCTION_TABLE
8133 rcStrict = g_aVMExitHandlers[VmxTransient.uExitReason].pfn(pVCpu, &VmxTransient);
8134#else
8135 rcStrict = vmxHCHandleExit(pVCpu, &VmxTransient);
8136#endif
8137 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
8138 if (rcStrict == VINF_SUCCESS)
8139 {
8140 if (++(*pcLoops) <= cMaxResumeLoops)
8141 continue;
8142 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchMaxResumeLoops);
8143 rcStrict = VINF_EM_RAW_INTERRUPT;
8144 }
8145 break;
8146 }
8147
8148 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
8149 return rcStrict;
8150}
8151
8152
8153#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8154/**
8155 * Runs the nested-guest code using hardware-assisted VMX.
8156 *
8157 * @returns VBox status code.
8158 * @param pVCpu The cross context virtual CPU structure.
8159 * @param pcLoops Pointer to the number of executed loops.
8160 *
8161 * @sa vmxHCRunGuestCodeNormal.
8162 */
8163static VBOXSTRICTRC vmxHCRunGuestCodeNested(PVMCPUCC pVCpu, uint32_t *pcLoops)
8164{
8165 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
8166 Assert(pcLoops);
8167 Assert(*pcLoops <= cMaxResumeLoops);
8168 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
8169
8170 /*
8171 * Switch to the nested-guest VMCS as we may have transitioned from executing the
8172 * guest without leaving ring-0. Otherwise, if we came from ring-3 we would have
8173 * loaded the nested-guest VMCS while entering the VMX ring-0 session.
8174 */
8175 if (!pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs)
8176 {
8177 int rc = vmxHCSwitchToGstOrNstGstVmcs(pVCpu, true /* fSwitchToNstGstVmcs */);
8178 if (RT_SUCCESS(rc))
8179 { /* likely */ }
8180 else
8181 {
8182 LogRelFunc(("Failed to switch to the nested-guest VMCS. rc=%Rrc\n", rc));
8183 return rc;
8184 }
8185 }
8186
8187 VMXTRANSIENT VmxTransient;
8188 RT_ZERO(VmxTransient);
8189 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
8190 VmxTransient.fIsNestedGuest = true;
8191
8192 /* Paranoia. */
8193 Assert(VmxTransient.pVmcsInfo == &pVCpu->hmr0.s.vmx.VmcsInfoNstGst);
8194
8195 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
8196 for (;;)
8197 {
8198 Assert(!HMR0SuspendPending());
8199 HMVMX_ASSERT_CPU_SAFE(pVCpu);
8200 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
8201
8202 /*
8203 * Preparatory work for running guest code, this may force us to
8204 * return to ring-3.
8205 *
8206 * Warning! This bugger disables interrupts on VINF_SUCCESS!
8207 */
8208 rcStrict = vmxHCPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
8209 if (rcStrict != VINF_SUCCESS)
8210 break;
8211
8212 /* Interrupts are disabled at this point! */
8213 vmxHCPreRunGuestCommitted(pVCpu, &VmxTransient);
8214 int rcRun = vmxHCRunGuest(pVCpu, &VmxTransient);
8215 vmxHCPostRunGuest(pVCpu, &VmxTransient, rcRun);
8216 /* Interrupts are re-enabled at this point! */
8217
8218 /*
8219 * Check for errors with running the VM (VMLAUNCH/VMRESUME).
8220 */
8221 if (RT_SUCCESS(rcRun))
8222 { /* very likely */ }
8223 else
8224 {
8225 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, x);
8226 vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
8227 return rcRun;
8228 }
8229
8230 /*
8231 * Profile the VM-exit.
8232 */
8233 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8234 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitAll);
8235 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatNestedExitAll);
8236 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatNestedExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8237 STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, &VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
8238 HMVMX_START_EXIT_DISPATCH_PROF();
8239
8240 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
8241
8242 /*
8243 * Handle the VM-exit.
8244 */
8245 rcStrict = vmxHCHandleExitNested(pVCpu, &VmxTransient);
8246 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
8247 if (rcStrict == VINF_SUCCESS)
8248 {
8249 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
8250 {
8251 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchNstGstVmexit);
8252 rcStrict = VINF_VMX_VMEXIT;
8253 }
8254 else
8255 {
8256 if (++(*pcLoops) <= cMaxResumeLoops)
8257 continue;
8258 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchMaxResumeLoops);
8259 rcStrict = VINF_EM_RAW_INTERRUPT;
8260 }
8261 }
8262 else
8263 Assert(rcStrict != VINF_VMX_VMEXIT);
8264 break;
8265 }
8266
8267 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
8268 return rcStrict;
8269}
8270#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
8271
8272
8273/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
8274 * probes.
8275 *
8276 * The following few functions and associated structure contains the bloat
8277 * necessary for providing detailed debug events and dtrace probes as well as
8278 * reliable host side single stepping. This works on the principle of
8279 * "subclassing" the normal execution loop and workers. We replace the loop
8280 * method completely and override selected helpers to add necessary adjustments
8281 * to their core operation.
8282 *
8283 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
8284 * any performance for debug and analysis features.
8285 *
8286 * @{
8287 */
8288
8289/**
8290 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
8291 * the debug run loop.
8292 */
8293typedef struct VMXRUNDBGSTATE
8294{
8295 /** The RIP we started executing at. This is for detecting that we stepped. */
8296 uint64_t uRipStart;
8297 /** The CS we started executing with. */
8298 uint16_t uCsStart;
8299
8300 /** Whether we've actually modified the 1st execution control field. */
8301 bool fModifiedProcCtls : 1;
8302 /** Whether we've actually modified the 2nd execution control field. */
8303 bool fModifiedProcCtls2 : 1;
8304 /** Whether we've actually modified the exception bitmap. */
8305 bool fModifiedXcptBitmap : 1;
8306
8307 /** We desire the modified the CR0 mask to be cleared. */
8308 bool fClearCr0Mask : 1;
8309 /** We desire the modified the CR4 mask to be cleared. */
8310 bool fClearCr4Mask : 1;
8311 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
8312 uint32_t fCpe1Extra;
8313 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
8314 uint32_t fCpe1Unwanted;
8315 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
8316 uint32_t fCpe2Extra;
8317 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
8318 uint32_t bmXcptExtra;
8319 /** The sequence number of the Dtrace provider settings the state was
8320 * configured against. */
8321 uint32_t uDtraceSettingsSeqNo;
8322 /** VM-exits to check (one bit per VM-exit). */
8323 uint32_t bmExitsToCheck[3];
8324
8325 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
8326 uint32_t fProcCtlsInitial;
8327 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
8328 uint32_t fProcCtls2Initial;
8329 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
8330 uint32_t bmXcptInitial;
8331} VMXRUNDBGSTATE;
8332AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
8333typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
8334
8335
8336/**
8337 * Initializes the VMXRUNDBGSTATE structure.
8338 *
8339 * @param pVCpu The cross context virtual CPU structure of the
8340 * calling EMT.
8341 * @param pVmxTransient The VMX-transient structure.
8342 * @param pDbgState The debug state to initialize.
8343 */
8344static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
8345{
8346 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
8347 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
8348
8349 pDbgState->fModifiedProcCtls = false;
8350 pDbgState->fModifiedProcCtls2 = false;
8351 pDbgState->fModifiedXcptBitmap = false;
8352 pDbgState->fClearCr0Mask = false;
8353 pDbgState->fClearCr4Mask = false;
8354 pDbgState->fCpe1Extra = 0;
8355 pDbgState->fCpe1Unwanted = 0;
8356 pDbgState->fCpe2Extra = 0;
8357 pDbgState->bmXcptExtra = 0;
8358 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
8359 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
8360 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
8361}
8362
8363
8364/**
8365 * Updates the VMSC fields with changes requested by @a pDbgState.
8366 *
8367 * This is performed after vmxHCPreRunGuestDebugStateUpdate as well
8368 * immediately before executing guest code, i.e. when interrupts are disabled.
8369 * We don't check status codes here as we cannot easily assert or return in the
8370 * latter case.
8371 *
8372 * @param pVCpu The cross context virtual CPU structure.
8373 * @param pVmxTransient The VMX-transient structure.
8374 * @param pDbgState The debug state.
8375 */
8376static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
8377{
8378 /*
8379 * Ensure desired flags in VMCS control fields are set.
8380 * (Ignoring write failure here, as we're committed and it's just debug extras.)
8381 *
8382 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
8383 * there should be no stale data in pCtx at this point.
8384 */
8385 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8386 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
8387 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
8388 {
8389 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
8390 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
8391 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8392 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
8393 pDbgState->fModifiedProcCtls = true;
8394 }
8395
8396 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
8397 {
8398 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
8399 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
8400 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
8401 pDbgState->fModifiedProcCtls2 = true;
8402 }
8403
8404 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
8405 {
8406 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
8407 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
8408 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
8409 pDbgState->fModifiedXcptBitmap = true;
8410 }
8411
8412 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
8413 {
8414 pVmcsInfo->u64Cr0Mask = 0;
8415 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
8416 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
8417 }
8418
8419 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
8420 {
8421 pVmcsInfo->u64Cr4Mask = 0;
8422 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
8423 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
8424 }
8425
8426 NOREF(pVCpu);
8427}
8428
8429
8430/**
8431 * Restores VMCS fields that were changed by vmxHCPreRunGuestDebugStateApply for
8432 * re-entry next time around.
8433 *
8434 * @returns Strict VBox status code (i.e. informational status codes too).
8435 * @param pVCpu The cross context virtual CPU structure.
8436 * @param pVmxTransient The VMX-transient structure.
8437 * @param pDbgState The debug state.
8438 * @param rcStrict The return code from executing the guest using single
8439 * stepping.
8440 */
8441static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
8442 VBOXSTRICTRC rcStrict)
8443{
8444 /*
8445 * Restore VM-exit control settings as we may not reenter this function the
8446 * next time around.
8447 */
8448 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8449
8450 /* We reload the initial value, trigger what we can of recalculations the
8451 next time around. From the looks of things, that's all that's required atm. */
8452 if (pDbgState->fModifiedProcCtls)
8453 {
8454 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
8455 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in vmxHCLeave */
8456 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
8457 AssertRC(rc2);
8458 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
8459 }
8460
8461 /* We're currently the only ones messing with this one, so just restore the
8462 cached value and reload the field. */
8463 if ( pDbgState->fModifiedProcCtls2
8464 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
8465 {
8466 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
8467 AssertRC(rc2);
8468 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
8469 }
8470
8471 /* If we've modified the exception bitmap, we restore it and trigger
8472 reloading and partial recalculation the next time around. */
8473 if (pDbgState->fModifiedXcptBitmap)
8474 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
8475
8476 return rcStrict;
8477}
8478
8479
8480/**
8481 * Configures VM-exit controls for current DBGF and DTrace settings.
8482 *
8483 * This updates @a pDbgState and the VMCS execution control fields to reflect
8484 * the necessary VM-exits demanded by DBGF and DTrace.
8485 *
8486 * @param pVCpu The cross context virtual CPU structure.
8487 * @param pVmxTransient The VMX-transient structure. May update
8488 * fUpdatedTscOffsettingAndPreemptTimer.
8489 * @param pDbgState The debug state.
8490 */
8491static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
8492{
8493 /*
8494 * Take down the dtrace serial number so we can spot changes.
8495 */
8496 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
8497 ASMCompilerBarrier();
8498
8499 /*
8500 * We'll rebuild most of the middle block of data members (holding the
8501 * current settings) as we go along here, so start by clearing it all.
8502 */
8503 pDbgState->bmXcptExtra = 0;
8504 pDbgState->fCpe1Extra = 0;
8505 pDbgState->fCpe1Unwanted = 0;
8506 pDbgState->fCpe2Extra = 0;
8507 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
8508 pDbgState->bmExitsToCheck[i] = 0;
8509
8510 /*
8511 * Software interrupts (INT XXh) - no idea how to trigger these...
8512 */
8513 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8514 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
8515 || VBOXVMM_INT_SOFTWARE_ENABLED())
8516 {
8517 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
8518 }
8519
8520 /*
8521 * INT3 breakpoints - triggered by #BP exceptions.
8522 */
8523 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
8524 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
8525
8526 /*
8527 * Exception bitmap and XCPT events+probes.
8528 */
8529 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
8530 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
8531 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
8532
8533 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
8534 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
8535 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
8536 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
8537 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
8538 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
8539 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
8540 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
8541 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
8542 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
8543 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
8544 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
8545 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
8546 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
8547 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
8548 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
8549 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
8550 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
8551
8552 if (pDbgState->bmXcptExtra)
8553 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
8554
8555 /*
8556 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
8557 *
8558 * Note! This is the reverse of what vmxHCHandleExitDtraceEvents does.
8559 * So, when adding/changing/removing please don't forget to update it.
8560 *
8561 * Some of the macros are picking up local variables to save horizontal space,
8562 * (being able to see it in a table is the lesser evil here).
8563 */
8564#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
8565 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
8566 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
8567#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
8568 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
8569 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
8570 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
8571 } else do { } while (0)
8572#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
8573 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
8574 { \
8575 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
8576 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
8577 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
8578 } else do { } while (0)
8579#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
8580 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
8581 { \
8582 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
8583 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
8584 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
8585 } else do { } while (0)
8586#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
8587 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
8588 { \
8589 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
8590 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
8591 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
8592 } else do { } while (0)
8593
8594 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
8595 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
8596 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
8597 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
8598 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
8599
8600 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
8601 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
8602 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
8603 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
8604 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
8605 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
8606 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
8607 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
8608 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
8609 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
8610 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
8611 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
8612 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
8613 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
8614 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
8615 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
8616 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
8617 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
8618 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
8619 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
8620 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
8621 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
8622 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
8623 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
8624 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
8625 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
8626 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
8627 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
8628 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
8629 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
8630 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
8631 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
8632 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
8633 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
8634 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
8635 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
8636
8637 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
8638 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
8639 {
8640 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
8641 | CPUMCTX_EXTRN_APIC_TPR);
8642 AssertRC(rc);
8643
8644#if 0 /** @todo fix me */
8645 pDbgState->fClearCr0Mask = true;
8646 pDbgState->fClearCr4Mask = true;
8647#endif
8648 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
8649 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
8650 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
8651 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
8652 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
8653 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
8654 require clearing here and in the loop if we start using it. */
8655 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
8656 }
8657 else
8658 {
8659 if (pDbgState->fClearCr0Mask)
8660 {
8661 pDbgState->fClearCr0Mask = false;
8662 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
8663 }
8664 if (pDbgState->fClearCr4Mask)
8665 {
8666 pDbgState->fClearCr4Mask = false;
8667 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
8668 }
8669 }
8670 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
8671 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
8672
8673 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
8674 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
8675 {
8676 /** @todo later, need to fix handler as it assumes this won't usually happen. */
8677 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
8678 }
8679 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
8680 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
8681
8682 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
8683 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
8684 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
8685 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
8686 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
8687 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
8688 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
8689 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
8690#if 0 /** @todo too slow, fix handler. */
8691 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
8692#endif
8693 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
8694
8695 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
8696 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
8697 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
8698 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
8699 {
8700 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
8701 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
8702 }
8703 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
8704 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
8705 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
8706 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
8707
8708 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
8709 || IS_EITHER_ENABLED(pVM, INSTR_STR)
8710 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
8711 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
8712 {
8713 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
8714 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
8715 }
8716 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
8717 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
8718 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
8719 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
8720
8721 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
8722 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
8723 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
8724 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
8725 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
8726 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
8727 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
8728 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
8729 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
8730 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
8731 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
8732 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
8733 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
8734 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
8735 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
8736 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
8737 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
8738 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
8739 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
8740 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
8741 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
8742 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
8743
8744#undef IS_EITHER_ENABLED
8745#undef SET_ONLY_XBM_IF_EITHER_EN
8746#undef SET_CPE1_XBM_IF_EITHER_EN
8747#undef SET_CPEU_XBM_IF_EITHER_EN
8748#undef SET_CPE2_XBM_IF_EITHER_EN
8749
8750 /*
8751 * Sanitize the control stuff.
8752 */
8753 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
8754 if (pDbgState->fCpe2Extra)
8755 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
8756 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
8757 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
8758 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
8759 {
8760 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
8761 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
8762 }
8763
8764 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
8765 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
8766 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
8767 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
8768}
8769
8770
8771/**
8772 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
8773 * appropriate.
8774 *
8775 * The caller has checked the VM-exit against the
8776 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
8777 * already, so we don't have to do that either.
8778 *
8779 * @returns Strict VBox status code (i.e. informational status codes too).
8780 * @param pVCpu The cross context virtual CPU structure.
8781 * @param pVmxTransient The VMX-transient structure.
8782 * @param uExitReason The VM-exit reason.
8783 *
8784 * @remarks The name of this function is displayed by dtrace, so keep it short
8785 * and to the point. No longer than 33 chars long, please.
8786 */
8787static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
8788{
8789 /*
8790 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
8791 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
8792 *
8793 * Note! This is the reverse operation of what vmxHCPreRunGuestDebugStateUpdate
8794 * does. Must add/change/remove both places. Same ordering, please.
8795 *
8796 * Added/removed events must also be reflected in the next section
8797 * where we dispatch dtrace events.
8798 */
8799 bool fDtrace1 = false;
8800 bool fDtrace2 = false;
8801 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
8802 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
8803 uint32_t uEventArg = 0;
8804#define SET_EXIT(a_EventSubName) \
8805 do { \
8806 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
8807 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
8808 } while (0)
8809#define SET_BOTH(a_EventSubName) \
8810 do { \
8811 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
8812 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
8813 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
8814 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
8815 } while (0)
8816 switch (uExitReason)
8817 {
8818 case VMX_EXIT_MTF:
8819 return vmxHCExitMtf(pVCpu, pVmxTransient);
8820
8821 case VMX_EXIT_XCPT_OR_NMI:
8822 {
8823 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
8824 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
8825 {
8826 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
8827 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
8828 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
8829 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
8830 {
8831 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
8832 {
8833 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
8834 uEventArg = pVmxTransient->uExitIntErrorCode;
8835 }
8836 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
8837 switch (enmEvent1)
8838 {
8839 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
8840 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
8841 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
8842 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
8843 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
8844 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
8845 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
8846 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
8847 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
8848 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
8849 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
8850 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
8851 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
8852 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
8853 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
8854 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
8855 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
8856 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
8857 default: break;
8858 }
8859 }
8860 else
8861 AssertFailed();
8862 break;
8863
8864 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
8865 uEventArg = idxVector;
8866 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
8867 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
8868 break;
8869 }
8870 break;
8871 }
8872
8873 case VMX_EXIT_TRIPLE_FAULT:
8874 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
8875 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
8876 break;
8877 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
8878 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
8879 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
8880 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
8881 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
8882
8883 /* Instruction specific VM-exits: */
8884 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
8885 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
8886 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
8887 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
8888 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
8889 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
8890 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
8891 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
8892 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
8893 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
8894 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
8895 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
8896 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
8897 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
8898 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
8899 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
8900 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
8901 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
8902 case VMX_EXIT_MOV_CRX:
8903 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8904 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
8905 SET_BOTH(CRX_READ);
8906 else
8907 SET_BOTH(CRX_WRITE);
8908 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
8909 break;
8910 case VMX_EXIT_MOV_DRX:
8911 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8912 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
8913 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
8914 SET_BOTH(DRX_READ);
8915 else
8916 SET_BOTH(DRX_WRITE);
8917 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
8918 break;
8919 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
8920 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
8921 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
8922 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
8923 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
8924 case VMX_EXIT_GDTR_IDTR_ACCESS:
8925 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8926 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
8927 {
8928 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
8929 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
8930 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
8931 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
8932 }
8933 break;
8934
8935 case VMX_EXIT_LDTR_TR_ACCESS:
8936 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
8937 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
8938 {
8939 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
8940 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
8941 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
8942 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
8943 }
8944 break;
8945
8946 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
8947 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
8948 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
8949 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
8950 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
8951 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
8952 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
8953 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
8954 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
8955 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
8956 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
8957
8958 /* Events that aren't relevant at this point. */
8959 case VMX_EXIT_EXT_INT:
8960 case VMX_EXIT_INT_WINDOW:
8961 case VMX_EXIT_NMI_WINDOW:
8962 case VMX_EXIT_TPR_BELOW_THRESHOLD:
8963 case VMX_EXIT_PREEMPT_TIMER:
8964 case VMX_EXIT_IO_INSTR:
8965 break;
8966
8967 /* Errors and unexpected events. */
8968 case VMX_EXIT_INIT_SIGNAL:
8969 case VMX_EXIT_SIPI:
8970 case VMX_EXIT_IO_SMI:
8971 case VMX_EXIT_SMI:
8972 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
8973 case VMX_EXIT_ERR_MSR_LOAD:
8974 case VMX_EXIT_ERR_MACHINE_CHECK:
8975 case VMX_EXIT_PML_FULL:
8976 case VMX_EXIT_VIRTUALIZED_EOI:
8977 break;
8978
8979 default:
8980 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
8981 break;
8982 }
8983#undef SET_BOTH
8984#undef SET_EXIT
8985
8986 /*
8987 * Dtrace tracepoints go first. We do them here at once so we don't
8988 * have to copy the guest state saving and stuff a few dozen times.
8989 * Down side is that we've got to repeat the switch, though this time
8990 * we use enmEvent since the probes are a subset of what DBGF does.
8991 */
8992 if (fDtrace1 || fDtrace2)
8993 {
8994 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
8995 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8996 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8997 switch (enmEvent1)
8998 {
8999 /** @todo consider which extra parameters would be helpful for each probe. */
9000 case DBGFEVENT_END: break;
9001 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
9002 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
9003 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
9004 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
9005 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
9006 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
9007 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
9008 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
9009 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
9010 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
9011 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
9012 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
9013 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
9014 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
9015 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
9016 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
9017 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
9018 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
9019 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
9020 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
9021 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
9022 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
9023 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
9024 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
9025 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
9026 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
9027 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
9028 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
9029 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
9030 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
9031 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
9032 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
9033 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
9034 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
9035 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
9036 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
9037 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
9038 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
9039 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
9040 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
9041 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
9042 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
9043 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
9044 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
9045 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
9046 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
9047 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
9048 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
9049 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
9050 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
9051 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
9052 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
9053 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
9054 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
9055 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
9056 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
9057 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
9058 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
9059 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
9060 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
9061 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
9062 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
9063 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
9064 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
9065 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
9066 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
9067 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
9068 }
9069 switch (enmEvent2)
9070 {
9071 /** @todo consider which extra parameters would be helpful for each probe. */
9072 case DBGFEVENT_END: break;
9073 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
9074 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
9075 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
9076 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
9077 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
9078 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
9079 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
9080 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
9081 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
9082 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
9083 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
9084 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
9085 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
9086 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
9087 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
9088 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
9089 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
9090 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
9091 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
9092 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
9093 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
9094 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
9095 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
9096 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
9097 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
9098 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
9099 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
9100 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
9101 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
9102 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
9103 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
9104 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
9105 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
9106 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
9107 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
9108 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
9109 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
9110 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
9111 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
9112 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
9113 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
9114 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
9115 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
9116 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
9117 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
9118 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
9119 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
9120 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
9121 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
9122 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
9123 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
9124 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
9125 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
9126 }
9127 }
9128
9129 /*
9130 * Fire of the DBGF event, if enabled (our check here is just a quick one,
9131 * the DBGF call will do a full check).
9132 *
9133 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
9134 * Note! If we have to events, we prioritize the first, i.e. the instruction
9135 * one, in order to avoid event nesting.
9136 */
9137 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9138 if ( enmEvent1 != DBGFEVENT_END
9139 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
9140 {
9141 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
9142 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
9143 if (rcStrict != VINF_SUCCESS)
9144 return rcStrict;
9145 }
9146 else if ( enmEvent2 != DBGFEVENT_END
9147 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
9148 {
9149 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
9150 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
9151 if (rcStrict != VINF_SUCCESS)
9152 return rcStrict;
9153 }
9154
9155 return VINF_SUCCESS;
9156}
9157
9158
9159/**
9160 * Single-stepping VM-exit filtering.
9161 *
9162 * This is preprocessing the VM-exits and deciding whether we've gotten far
9163 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
9164 * handling is performed.
9165 *
9166 * @returns Strict VBox status code (i.e. informational status codes too).
9167 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9168 * @param pVmxTransient The VMX-transient structure.
9169 * @param pDbgState The debug state.
9170 */
9171DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
9172{
9173 /*
9174 * Expensive (saves context) generic dtrace VM-exit probe.
9175 */
9176 uint32_t const uExitReason = pVmxTransient->uExitReason;
9177 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
9178 { /* more likely */ }
9179 else
9180 {
9181 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
9182 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
9183 AssertRC(rc);
9184 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
9185 }
9186
9187#ifdef IN_RING0 /* NMIs should never reach R3. */
9188 /*
9189 * Check for host NMI, just to get that out of the way.
9190 */
9191 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
9192 { /* normally likely */ }
9193 else
9194 {
9195 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
9196 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
9197 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
9198 return vmxHCExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
9199 }
9200#endif
9201
9202 /*
9203 * Check for single stepping event if we're stepping.
9204 */
9205 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
9206 {
9207 switch (uExitReason)
9208 {
9209 case VMX_EXIT_MTF:
9210 return vmxHCExitMtf(pVCpu, pVmxTransient);
9211
9212 /* Various events: */
9213 case VMX_EXIT_XCPT_OR_NMI:
9214 case VMX_EXIT_EXT_INT:
9215 case VMX_EXIT_TRIPLE_FAULT:
9216 case VMX_EXIT_INT_WINDOW:
9217 case VMX_EXIT_NMI_WINDOW:
9218 case VMX_EXIT_TASK_SWITCH:
9219 case VMX_EXIT_TPR_BELOW_THRESHOLD:
9220 case VMX_EXIT_APIC_ACCESS:
9221 case VMX_EXIT_EPT_VIOLATION:
9222 case VMX_EXIT_EPT_MISCONFIG:
9223 case VMX_EXIT_PREEMPT_TIMER:
9224
9225 /* Instruction specific VM-exits: */
9226 case VMX_EXIT_CPUID:
9227 case VMX_EXIT_GETSEC:
9228 case VMX_EXIT_HLT:
9229 case VMX_EXIT_INVD:
9230 case VMX_EXIT_INVLPG:
9231 case VMX_EXIT_RDPMC:
9232 case VMX_EXIT_RDTSC:
9233 case VMX_EXIT_RSM:
9234 case VMX_EXIT_VMCALL:
9235 case VMX_EXIT_VMCLEAR:
9236 case VMX_EXIT_VMLAUNCH:
9237 case VMX_EXIT_VMPTRLD:
9238 case VMX_EXIT_VMPTRST:
9239 case VMX_EXIT_VMREAD:
9240 case VMX_EXIT_VMRESUME:
9241 case VMX_EXIT_VMWRITE:
9242 case VMX_EXIT_VMXOFF:
9243 case VMX_EXIT_VMXON:
9244 case VMX_EXIT_MOV_CRX:
9245 case VMX_EXIT_MOV_DRX:
9246 case VMX_EXIT_IO_INSTR:
9247 case VMX_EXIT_RDMSR:
9248 case VMX_EXIT_WRMSR:
9249 case VMX_EXIT_MWAIT:
9250 case VMX_EXIT_MONITOR:
9251 case VMX_EXIT_PAUSE:
9252 case VMX_EXIT_GDTR_IDTR_ACCESS:
9253 case VMX_EXIT_LDTR_TR_ACCESS:
9254 case VMX_EXIT_INVEPT:
9255 case VMX_EXIT_RDTSCP:
9256 case VMX_EXIT_INVVPID:
9257 case VMX_EXIT_WBINVD:
9258 case VMX_EXIT_XSETBV:
9259 case VMX_EXIT_RDRAND:
9260 case VMX_EXIT_INVPCID:
9261 case VMX_EXIT_VMFUNC:
9262 case VMX_EXIT_RDSEED:
9263 case VMX_EXIT_XSAVES:
9264 case VMX_EXIT_XRSTORS:
9265 {
9266 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
9267 AssertRCReturn(rc, rc);
9268 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
9269 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
9270 return VINF_EM_DBG_STEPPED;
9271 break;
9272 }
9273
9274 /* Errors and unexpected events: */
9275 case VMX_EXIT_INIT_SIGNAL:
9276 case VMX_EXIT_SIPI:
9277 case VMX_EXIT_IO_SMI:
9278 case VMX_EXIT_SMI:
9279 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
9280 case VMX_EXIT_ERR_MSR_LOAD:
9281 case VMX_EXIT_ERR_MACHINE_CHECK:
9282 case VMX_EXIT_PML_FULL:
9283 case VMX_EXIT_VIRTUALIZED_EOI:
9284 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
9285 break;
9286
9287 default:
9288 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
9289 break;
9290 }
9291 }
9292
9293 /*
9294 * Check for debugger event breakpoints and dtrace probes.
9295 */
9296 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
9297 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
9298 {
9299 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
9300 if (rcStrict != VINF_SUCCESS)
9301 return rcStrict;
9302 }
9303
9304 /*
9305 * Normal processing.
9306 */
9307#ifdef HMVMX_USE_FUNCTION_TABLE
9308 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
9309#else
9310 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
9311#endif
9312}
9313
9314
9315/**
9316 * Single steps guest code using hardware-assisted VMX.
9317 *
9318 * This is -not- the same as the guest single-stepping itself (say using EFLAGS.TF)
9319 * but single-stepping through the hypervisor debugger.
9320 *
9321 * @returns Strict VBox status code (i.e. informational status codes too).
9322 * @param pVCpu The cross context virtual CPU structure.
9323 * @param pcLoops Pointer to the number of executed loops.
9324 *
9325 * @note Mostly the same as vmxHCRunGuestCodeNormal().
9326 */
9327static VBOXSTRICTRC vmxHCRunGuestCodeDebug(PVMCPUCC pVCpu, uint32_t *pcLoops)
9328{
9329 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
9330 Assert(pcLoops);
9331 Assert(*pcLoops <= cMaxResumeLoops);
9332
9333 VMXTRANSIENT VmxTransient;
9334 RT_ZERO(VmxTransient);
9335 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
9336
9337 /* Set HMCPU indicators. */
9338 bool const fSavedSingleInstruction = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
9339 VCPU_2_VMXSTATE(pVCpu).fSingleInstruction = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction || DBGFIsStepping(pVCpu);
9340 pVCpu->hmr0.s.fDebugWantRdTscExit = false;
9341 pVCpu->hmr0.s.fUsingDebugLoop = true;
9342
9343 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */
9344 VMXRUNDBGSTATE DbgState;
9345 vmxHCRunDebugStateInit(pVCpu, &VmxTransient, &DbgState);
9346 vmxHCPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
9347
9348 /*
9349 * The loop.
9350 */
9351 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
9352 for (;;)
9353 {
9354 Assert(!HMR0SuspendPending());
9355 HMVMX_ASSERT_CPU_SAFE(pVCpu);
9356 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
9357 bool fStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
9358
9359 /* Set up VM-execution controls the next two can respond to. */
9360 vmxHCPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
9361
9362 /*
9363 * Preparatory work for running guest code, this may force us to
9364 * return to ring-3.
9365 *
9366 * Warning! This bugger disables interrupts on VINF_SUCCESS!
9367 */
9368 rcStrict = vmxHCPreRunGuest(pVCpu, &VmxTransient, fStepping);
9369 if (rcStrict != VINF_SUCCESS)
9370 break;
9371
9372 /* Interrupts are disabled at this point! */
9373 vmxHCPreRunGuestCommitted(pVCpu, &VmxTransient);
9374
9375 /* Override any obnoxious code in the above two calls. */
9376 vmxHCPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
9377
9378 /*
9379 * Finally execute the guest.
9380 */
9381 int rcRun = vmxHCRunGuest(pVCpu, &VmxTransient);
9382
9383 vmxHCPostRunGuest(pVCpu, &VmxTransient, rcRun);
9384 /* Interrupts are re-enabled at this point! */
9385
9386 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
9387 if (RT_SUCCESS(rcRun))
9388 { /* very likely */ }
9389 else
9390 {
9391 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, x);
9392 vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
9393 return rcRun;
9394 }
9395
9396 /* Profile the VM-exit. */
9397 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
9398 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitAll);
9399 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
9400 STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, &VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
9401 HMVMX_START_EXIT_DISPATCH_PROF();
9402
9403 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
9404
9405 /*
9406 * Handle the VM-exit - we quit earlier on certain VM-exits, see vmxHCHandleExitDebug().
9407 */
9408 rcStrict = vmxHCRunDebugHandleExit(pVCpu, &VmxTransient, &DbgState);
9409 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
9410 if (rcStrict != VINF_SUCCESS)
9411 break;
9412 if (++(*pcLoops) > cMaxResumeLoops)
9413 {
9414 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchMaxResumeLoops);
9415 rcStrict = VINF_EM_RAW_INTERRUPT;
9416 break;
9417 }
9418
9419 /*
9420 * Stepping: Did the RIP change, if so, consider it a single step.
9421 * Otherwise, make sure one of the TFs gets set.
9422 */
9423 if (fStepping)
9424 {
9425 int rc = vmxHCImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
9426 AssertRC(rc);
9427 if ( pVCpu->cpum.GstCtx.rip != DbgState.uRipStart
9428 || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart)
9429 {
9430 rcStrict = VINF_EM_DBG_STEPPED;
9431 break;
9432 }
9433 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
9434 }
9435
9436 /*
9437 * Update when dtrace settings changes (DBGF kicks us, so no need to check).
9438 */
9439 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
9440 vmxHCPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
9441
9442 /* Restore all controls applied by vmxHCPreRunGuestDebugStateApply above. */
9443 rcStrict = vmxHCRunDebugStateRevert(pVCpu, &VmxTransient, &DbgState, rcStrict);
9444 Assert(rcStrict == VINF_SUCCESS);
9445 }
9446
9447 /*
9448 * Clear the X86_EFL_TF if necessary.
9449 */
9450 if (pVCpu->hmr0.s.fClearTrapFlag)
9451 {
9452 int rc = vmxHCImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
9453 AssertRC(rc);
9454 pVCpu->hmr0.s.fClearTrapFlag = false;
9455 pVCpu->cpum.GstCtx.eflags.Bits.u1TF = 0;
9456 }
9457 /** @todo there seems to be issues with the resume flag when the monitor trap
9458 * flag is pending without being used. Seen early in bios init when
9459 * accessing APIC page in protected mode. */
9460
9461 /* Restore HMCPU indicators. */
9462 pVCpu->hmr0.s.fUsingDebugLoop = false;
9463 pVCpu->hmr0.s.fDebugWantRdTscExit = false;
9464 VCPU_2_VMXSTATE(pVCpu).fSingleInstruction = fSavedSingleInstruction;
9465
9466 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
9467 return rcStrict;
9468}
9469#endif
9470
9471/** @} */
9472
9473
9474#ifndef HMVMX_USE_FUNCTION_TABLE
9475/**
9476 * Handles a guest VM-exit from hardware-assisted VMX execution.
9477 *
9478 * @returns Strict VBox status code (i.e. informational status codes too).
9479 * @param pVCpu The cross context virtual CPU structure.
9480 * @param pVmxTransient The VMX-transient structure.
9481 */
9482DECLINLINE(VBOXSTRICTRC) vmxHCHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9483{
9484#ifdef DEBUG_ramshankar
9485# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
9486 do { \
9487 if (a_fSave != 0) \
9488 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
9489 VBOXSTRICTRC rcStrict = a_CallExpr; \
9490 if (a_fSave != 0) \
9491 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
9492 return rcStrict; \
9493 } while (0)
9494#else
9495# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
9496#endif
9497 uint32_t const uExitReason = pVmxTransient->uExitReason;
9498 switch (uExitReason)
9499 {
9500 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, vmxHCExitEptMisconfig(pVCpu, pVmxTransient));
9501 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, vmxHCExitEptViolation(pVCpu, pVmxTransient));
9502 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, vmxHCExitIoInstr(pVCpu, pVmxTransient));
9503 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, vmxHCExitCpuid(pVCpu, pVmxTransient));
9504 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, vmxHCExitRdtsc(pVCpu, pVmxTransient));
9505 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, vmxHCExitRdtscp(pVCpu, pVmxTransient));
9506 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, vmxHCExitApicAccess(pVCpu, pVmxTransient));
9507 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, vmxHCExitXcptOrNmi(pVCpu, pVmxTransient));
9508 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, vmxHCExitMovCRx(pVCpu, pVmxTransient));
9509 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, vmxHCExitExtInt(pVCpu, pVmxTransient));
9510 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, vmxHCExitIntWindow(pVCpu, pVmxTransient));
9511 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient));
9512 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, vmxHCExitMwait(pVCpu, pVmxTransient));
9513 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, vmxHCExitMonitor(pVCpu, pVmxTransient));
9514 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, vmxHCExitTaskSwitch(pVCpu, pVmxTransient));
9515 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, vmxHCExitPreemptTimer(pVCpu, pVmxTransient));
9516 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, vmxHCExitRdmsr(pVCpu, pVmxTransient));
9517 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, vmxHCExitWrmsr(pVCpu, pVmxTransient));
9518 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, vmxHCExitVmcall(pVCpu, pVmxTransient));
9519 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, vmxHCExitMovDRx(pVCpu, pVmxTransient));
9520 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, vmxHCExitHlt(pVCpu, pVmxTransient));
9521 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, vmxHCExitInvd(pVCpu, pVmxTransient));
9522 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, vmxHCExitInvlpg(pVCpu, pVmxTransient));
9523 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, vmxHCExitMtf(pVCpu, pVmxTransient));
9524 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, vmxHCExitPause(pVCpu, pVmxTransient));
9525 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, vmxHCExitWbinvd(pVCpu, pVmxTransient));
9526 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, vmxHCExitXsetbv(pVCpu, pVmxTransient));
9527 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, vmxHCExitInvpcid(pVCpu, pVmxTransient));
9528 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, vmxHCExitGetsec(pVCpu, pVmxTransient));
9529 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, vmxHCExitRdpmc(pVCpu, pVmxTransient));
9530#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9531 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, vmxHCExitVmclear(pVCpu, pVmxTransient));
9532 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, vmxHCExitVmlaunch(pVCpu, pVmxTransient));
9533 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, vmxHCExitVmptrld(pVCpu, pVmxTransient));
9534 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, vmxHCExitVmptrst(pVCpu, pVmxTransient));
9535 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, vmxHCExitVmread(pVCpu, pVmxTransient));
9536 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, vmxHCExitVmwrite(pVCpu, pVmxTransient));
9537 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, vmxHCExitVmresume(pVCpu, pVmxTransient));
9538 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, vmxHCExitVmxoff(pVCpu, pVmxTransient));
9539 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, vmxHCExitVmxon(pVCpu, pVmxTransient));
9540 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, vmxHCExitInvvpid(pVCpu, pVmxTransient));
9541 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient));
9542#else
9543 case VMX_EXIT_VMCLEAR:
9544 case VMX_EXIT_VMLAUNCH:
9545 case VMX_EXIT_VMPTRLD:
9546 case VMX_EXIT_VMPTRST:
9547 case VMX_EXIT_VMREAD:
9548 case VMX_EXIT_VMRESUME:
9549 case VMX_EXIT_VMWRITE:
9550 case VMX_EXIT_VMXOFF:
9551 case VMX_EXIT_VMXON:
9552 case VMX_EXIT_INVVPID:
9553 case VMX_EXIT_INVEPT:
9554 return vmxHCExitSetPendingXcptUD(pVCpu, pVmxTransient);
9555#endif
9556
9557 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFault(pVCpu, pVmxTransient);
9558 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindow(pVCpu, pVmxTransient);
9559 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
9560
9561 case VMX_EXIT_INIT_SIGNAL:
9562 case VMX_EXIT_SIPI:
9563 case VMX_EXIT_IO_SMI:
9564 case VMX_EXIT_SMI:
9565 case VMX_EXIT_ERR_MSR_LOAD:
9566 case VMX_EXIT_ERR_MACHINE_CHECK:
9567 case VMX_EXIT_PML_FULL:
9568 case VMX_EXIT_VIRTUALIZED_EOI:
9569 case VMX_EXIT_GDTR_IDTR_ACCESS:
9570 case VMX_EXIT_LDTR_TR_ACCESS:
9571 case VMX_EXIT_APIC_WRITE:
9572 case VMX_EXIT_RDRAND:
9573 case VMX_EXIT_RSM:
9574 case VMX_EXIT_VMFUNC:
9575 case VMX_EXIT_ENCLS:
9576 case VMX_EXIT_RDSEED:
9577 case VMX_EXIT_XSAVES:
9578 case VMX_EXIT_XRSTORS:
9579 case VMX_EXIT_UMWAIT:
9580 case VMX_EXIT_TPAUSE:
9581 case VMX_EXIT_LOADIWKEY:
9582 default:
9583 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
9584 }
9585#undef VMEXIT_CALL_RET
9586}
9587#endif /* !HMVMX_USE_FUNCTION_TABLE */
9588
9589
9590#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9591/**
9592 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
9593 *
9594 * @returns Strict VBox status code (i.e. informational status codes too).
9595 * @param pVCpu The cross context virtual CPU structure.
9596 * @param pVmxTransient The VMX-transient structure.
9597 */
9598DECLINLINE(VBOXSTRICTRC) vmxHCHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9599{
9600 uint32_t const uExitReason = pVmxTransient->uExitReason;
9601 switch (uExitReason)
9602 {
9603 case VMX_EXIT_EPT_MISCONFIG: return vmxHCExitEptMisconfig(pVCpu, pVmxTransient);
9604 case VMX_EXIT_EPT_VIOLATION: return vmxHCExitEptViolation(pVCpu, pVmxTransient);
9605 case VMX_EXIT_XCPT_OR_NMI: return vmxHCExitXcptOrNmiNested(pVCpu, pVmxTransient);
9606 case VMX_EXIT_IO_INSTR: return vmxHCExitIoInstrNested(pVCpu, pVmxTransient);
9607 case VMX_EXIT_HLT: return vmxHCExitHltNested(pVCpu, pVmxTransient);
9608
9609 /*
9610 * We shouldn't direct host physical interrupts to the nested-guest.
9611 */
9612 case VMX_EXIT_EXT_INT:
9613 return vmxHCExitExtInt(pVCpu, pVmxTransient);
9614
9615 /*
9616 * Instructions that cause VM-exits unconditionally or the condition is
9617 * always is taken solely from the nested hypervisor (meaning if the VM-exit
9618 * happens, it's guaranteed to be a nested-guest VM-exit).
9619 *
9620 * - Provides VM-exit instruction length ONLY.
9621 */
9622 case VMX_EXIT_CPUID: /* Unconditional. */
9623 case VMX_EXIT_VMCALL:
9624 case VMX_EXIT_GETSEC:
9625 case VMX_EXIT_INVD:
9626 case VMX_EXIT_XSETBV:
9627 case VMX_EXIT_VMLAUNCH:
9628 case VMX_EXIT_VMRESUME:
9629 case VMX_EXIT_VMXOFF:
9630 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
9631 case VMX_EXIT_VMFUNC:
9632 return vmxHCExitInstrNested(pVCpu, pVmxTransient);
9633
9634 /*
9635 * Instructions that cause VM-exits unconditionally or the condition is
9636 * always is taken solely from the nested hypervisor (meaning if the VM-exit
9637 * happens, it's guaranteed to be a nested-guest VM-exit).
9638 *
9639 * - Provides VM-exit instruction length.
9640 * - Provides VM-exit information.
9641 * - Optionally provides Exit qualification.
9642 *
9643 * Since Exit qualification is 0 for all VM-exits where it is not
9644 * applicable, reading and passing it to the guest should produce
9645 * defined behavior.
9646 *
9647 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
9648 */
9649 case VMX_EXIT_INVEPT: /* Unconditional. */
9650 case VMX_EXIT_INVVPID:
9651 case VMX_EXIT_VMCLEAR:
9652 case VMX_EXIT_VMPTRLD:
9653 case VMX_EXIT_VMPTRST:
9654 case VMX_EXIT_VMXON:
9655 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
9656 case VMX_EXIT_LDTR_TR_ACCESS:
9657 case VMX_EXIT_RDRAND:
9658 case VMX_EXIT_RDSEED:
9659 case VMX_EXIT_XSAVES:
9660 case VMX_EXIT_XRSTORS:
9661 case VMX_EXIT_UMWAIT:
9662 case VMX_EXIT_TPAUSE:
9663 return vmxHCExitInstrWithInfoNested(pVCpu, pVmxTransient);
9664
9665 case VMX_EXIT_RDTSC: return vmxHCExitRdtscNested(pVCpu, pVmxTransient);
9666 case VMX_EXIT_RDTSCP: return vmxHCExitRdtscpNested(pVCpu, pVmxTransient);
9667 case VMX_EXIT_RDMSR: return vmxHCExitRdmsrNested(pVCpu, pVmxTransient);
9668 case VMX_EXIT_WRMSR: return vmxHCExitWrmsrNested(pVCpu, pVmxTransient);
9669 case VMX_EXIT_INVLPG: return vmxHCExitInvlpgNested(pVCpu, pVmxTransient);
9670 case VMX_EXIT_INVPCID: return vmxHCExitInvpcidNested(pVCpu, pVmxTransient);
9671 case VMX_EXIT_TASK_SWITCH: return vmxHCExitTaskSwitchNested(pVCpu, pVmxTransient);
9672 case VMX_EXIT_WBINVD: return vmxHCExitWbinvdNested(pVCpu, pVmxTransient);
9673 case VMX_EXIT_MTF: return vmxHCExitMtfNested(pVCpu, pVmxTransient);
9674 case VMX_EXIT_APIC_ACCESS: return vmxHCExitApicAccessNested(pVCpu, pVmxTransient);
9675 case VMX_EXIT_APIC_WRITE: return vmxHCExitApicWriteNested(pVCpu, pVmxTransient);
9676 case VMX_EXIT_VIRTUALIZED_EOI: return vmxHCExitVirtEoiNested(pVCpu, pVmxTransient);
9677 case VMX_EXIT_MOV_CRX: return vmxHCExitMovCRxNested(pVCpu, pVmxTransient);
9678 case VMX_EXIT_INT_WINDOW: return vmxHCExitIntWindowNested(pVCpu, pVmxTransient);
9679 case VMX_EXIT_NMI_WINDOW: return vmxHCExitNmiWindowNested(pVCpu, pVmxTransient);
9680 case VMX_EXIT_TPR_BELOW_THRESHOLD: return vmxHCExitTprBelowThresholdNested(pVCpu, pVmxTransient);
9681 case VMX_EXIT_MWAIT: return vmxHCExitMwaitNested(pVCpu, pVmxTransient);
9682 case VMX_EXIT_MONITOR: return vmxHCExitMonitorNested(pVCpu, pVmxTransient);
9683 case VMX_EXIT_PAUSE: return vmxHCExitPauseNested(pVCpu, pVmxTransient);
9684
9685 case VMX_EXIT_PREEMPT_TIMER:
9686 {
9687 /** @todo NSTVMX: Preempt timer. */
9688 return vmxHCExitPreemptTimer(pVCpu, pVmxTransient);
9689 }
9690
9691 case VMX_EXIT_MOV_DRX: return vmxHCExitMovDRxNested(pVCpu, pVmxTransient);
9692 case VMX_EXIT_RDPMC: return vmxHCExitRdpmcNested(pVCpu, pVmxTransient);
9693
9694 case VMX_EXIT_VMREAD:
9695 case VMX_EXIT_VMWRITE: return vmxHCExitVmreadVmwriteNested(pVCpu, pVmxTransient);
9696
9697 case VMX_EXIT_TRIPLE_FAULT: return vmxHCExitTripleFaultNested(pVCpu, pVmxTransient);
9698 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return vmxHCExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
9699
9700 case VMX_EXIT_INIT_SIGNAL:
9701 case VMX_EXIT_SIPI:
9702 case VMX_EXIT_IO_SMI:
9703 case VMX_EXIT_SMI:
9704 case VMX_EXIT_ERR_MSR_LOAD:
9705 case VMX_EXIT_ERR_MACHINE_CHECK:
9706 case VMX_EXIT_PML_FULL:
9707 case VMX_EXIT_RSM:
9708 default:
9709 return vmxHCExitErrUnexpected(pVCpu, pVmxTransient);
9710 }
9711}
9712#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9713
9714
9715/** @name VM-exit helpers.
9716 * @{
9717 */
9718/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9719/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
9720/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9721
9722/** Macro for VM-exits called unexpectedly. */
9723#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
9724 do { \
9725 VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
9726 return VERR_VMX_UNEXPECTED_EXIT; \
9727 } while (0)
9728
9729#ifdef VBOX_STRICT
9730# ifdef IN_RING0
9731/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
9732# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
9733 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
9734
9735# define HMVMX_ASSERT_PREEMPT_CPUID() \
9736 do { \
9737 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
9738 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
9739 } while (0)
9740
9741# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
9742 do { \
9743 AssertPtr((a_pVCpu)); \
9744 AssertPtr((a_pVmxTransient)); \
9745 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
9746 Assert((a_pVmxTransient)->pVmcsInfo); \
9747 Assert(ASMIntAreEnabled()); \
9748 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
9749 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
9750 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
9751 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
9752 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
9753 HMVMX_ASSERT_PREEMPT_CPUID(); \
9754 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
9755 } while (0)
9756# else
9757# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() do { } while(0)
9758# define HMVMX_ASSERT_PREEMPT_CPUID() do { } while(0)
9759# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
9760 do { \
9761 AssertPtr((a_pVCpu)); \
9762 AssertPtr((a_pVmxTransient)); \
9763 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
9764 Assert((a_pVmxTransient)->pVmcsInfo); \
9765 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
9766 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
9767 } while (0)
9768# endif
9769
9770# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
9771 do { \
9772 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
9773 Assert((a_pVmxTransient)->fIsNestedGuest); \
9774 } while (0)
9775
9776# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
9777 do { \
9778 Log4Func(("\n")); \
9779 } while (0)
9780#else
9781# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
9782 do { \
9783 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
9784 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
9785 } while (0)
9786
9787# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
9788 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
9789
9790# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
9791#endif
9792
9793#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9794/** Macro that does the necessary privilege checks and intercepted VM-exits for
9795 * guests that attempted to execute a VMX instruction. */
9796# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
9797 do \
9798 { \
9799 VBOXSTRICTRC rcStrictTmp = vmxHCCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
9800 if (rcStrictTmp == VINF_SUCCESS) \
9801 { /* likely */ } \
9802 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
9803 { \
9804 Assert((a_pVCpu)->hm.s.Event.fPending); \
9805 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
9806 return VINF_SUCCESS; \
9807 } \
9808 else \
9809 { \
9810 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
9811 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
9812 } \
9813 } while (0)
9814
9815/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
9816# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
9817 do \
9818 { \
9819 VBOXSTRICTRC rcStrictTmp = vmxHCDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
9820 (a_pGCPtrEffAddr)); \
9821 if (rcStrictTmp == VINF_SUCCESS) \
9822 { /* likely */ } \
9823 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
9824 { \
9825 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
9826 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
9827 NOREF(uXcptTmp); \
9828 return VINF_SUCCESS; \
9829 } \
9830 else \
9831 { \
9832 Log4Func(("vmxHCDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
9833 return rcStrictTmp; \
9834 } \
9835 } while (0)
9836#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9837
9838
9839/**
9840 * Advances the guest RIP by the specified number of bytes.
9841 *
9842 * @param pVCpu The cross context virtual CPU structure.
9843 * @param cbInstr Number of bytes to advance the RIP by.
9844 *
9845 * @remarks No-long-jump zone!!!
9846 */
9847DECLINLINE(void) vmxHCAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
9848{
9849 /* Advance the RIP. */
9850 pVCpu->cpum.GstCtx.rip += cbInstr;
9851 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
9852
9853 /* Update interrupt inhibition. */
9854 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9855 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
9856 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
9857}
9858
9859
9860/**
9861 * Advances the guest RIP after reading it from the VMCS.
9862 *
9863 * @returns VBox status code, no informational status codes.
9864 * @param pVCpu The cross context virtual CPU structure.
9865 * @param pVmxTransient The VMX-transient structure.
9866 *
9867 * @remarks No-long-jump zone!!!
9868 */
9869static int vmxHCAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9870{
9871 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9872 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
9873 AssertRCReturn(rc, rc);
9874
9875 vmxHCAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
9876 return VINF_SUCCESS;
9877}
9878
9879
9880/**
9881 * Handle a condition that occurred while delivering an event through the guest or
9882 * nested-guest IDT.
9883 *
9884 * @returns Strict VBox status code (i.e. informational status codes too).
9885 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
9886 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
9887 * to continue execution of the guest which will delivery the \#DF.
9888 * @retval VINF_EM_RESET if we detected a triple-fault condition.
9889 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
9890 *
9891 * @param pVCpu The cross context virtual CPU structure.
9892 * @param pVmxTransient The VMX-transient structure.
9893 *
9894 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
9895 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
9896 * is due to an EPT violation, PML full or SPP-related event.
9897 *
9898 * @remarks No-long-jump zone!!!
9899 */
9900static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9901{
9902 Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
9903 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
9904 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
9905 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
9906 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
9907 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
9908
9909 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
9910 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9911 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
9912 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
9913 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
9914 {
9915 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
9916 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
9917
9918 /*
9919 * If the event was a software interrupt (generated with INT n) or a software exception
9920 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
9921 * can handle the VM-exit and continue guest execution which will re-execute the
9922 * instruction rather than re-injecting the exception, as that can cause premature
9923 * trips to ring-3 before injection and involve TRPM which currently has no way of
9924 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
9925 * the problem).
9926 */
9927 IEMXCPTRAISE enmRaise;
9928 IEMXCPTRAISEINFO fRaiseInfo;
9929 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
9930 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
9931 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
9932 {
9933 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
9934 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
9935 }
9936 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
9937 {
9938 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
9939 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
9940 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
9941
9942 uint32_t const fIdtVectorFlags = vmxHCGetIemXcptFlags(uIdtVector, uIdtVectorType);
9943 uint32_t const fExitVectorFlags = vmxHCGetIemXcptFlags(uExitVector, uExitVectorType);
9944
9945 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
9946
9947 /* Determine a vectoring #PF condition, see comment in vmxHCExitXcptPF(). */
9948 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
9949 {
9950 pVmxTransient->fVectoringPF = true;
9951 enmRaise = IEMXCPTRAISE_PREV_EVENT;
9952 }
9953 }
9954 else
9955 {
9956 /*
9957 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
9958 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
9959 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
9960 */
9961 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
9962 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
9963 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
9964 enmRaise = IEMXCPTRAISE_PREV_EVENT;
9965 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
9966 }
9967
9968 /*
9969 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
9970 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
9971 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
9972 * subsequent VM-entry would fail, see @bugref{7445}.
9973 *
9974 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
9975 */
9976 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
9977 && enmRaise == IEMXCPTRAISE_PREV_EVENT
9978 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
9979 && CPUMIsGuestNmiBlocking(pVCpu))
9980 {
9981 CPUMSetGuestNmiBlocking(pVCpu, false);
9982 }
9983
9984 switch (enmRaise)
9985 {
9986 case IEMXCPTRAISE_CURRENT_XCPT:
9987 {
9988 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
9989 Assert(rcStrict == VINF_SUCCESS);
9990 break;
9991 }
9992
9993 case IEMXCPTRAISE_PREV_EVENT:
9994 {
9995 uint32_t u32ErrCode;
9996 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
9997 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
9998 else
9999 u32ErrCode = 0;
10000
10001 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
10002 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflect);
10003 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
10004 u32ErrCode, pVCpu->cpum.GstCtx.cr2);
10005
10006 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
10007 VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
10008 Assert(rcStrict == VINF_SUCCESS);
10009 break;
10010 }
10011
10012 case IEMXCPTRAISE_REEXEC_INSTR:
10013 Assert(rcStrict == VINF_SUCCESS);
10014 break;
10015
10016 case IEMXCPTRAISE_DOUBLE_FAULT:
10017 {
10018 /*
10019 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
10020 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
10021 */
10022 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
10023 {
10024 pVmxTransient->fVectoringDoublePF = true;
10025 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
10026 pVCpu->cpum.GstCtx.cr2));
10027 rcStrict = VINF_SUCCESS;
10028 }
10029 else
10030 {
10031 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectConvertDF);
10032 vmxHCSetPendingXcptDF(pVCpu);
10033 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
10034 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
10035 rcStrict = VINF_HM_DOUBLE_FAULT;
10036 }
10037 break;
10038 }
10039
10040 case IEMXCPTRAISE_TRIPLE_FAULT:
10041 {
10042 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
10043 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
10044 rcStrict = VINF_EM_RESET;
10045 break;
10046 }
10047
10048 case IEMXCPTRAISE_CPU_HANG:
10049 {
10050 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
10051 rcStrict = VERR_EM_GUEST_CPU_HANG;
10052 break;
10053 }
10054
10055 default:
10056 {
10057 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
10058 rcStrict = VERR_VMX_IPE_2;
10059 break;
10060 }
10061 }
10062 }
10063 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
10064 && !CPUMIsGuestNmiBlocking(pVCpu))
10065 {
10066 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
10067 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
10068 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
10069 {
10070 /*
10071 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
10072 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
10073 * that virtual NMIs remain blocked until the IRET execution is completed.
10074 *
10075 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
10076 */
10077 CPUMSetGuestNmiBlocking(pVCpu, true);
10078 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
10079 }
10080 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
10081 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
10082 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
10083 {
10084 /*
10085 * Execution of IRET caused an EPT violation, page-modification log-full event or
10086 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
10087 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
10088 * that virtual NMIs remain blocked until the IRET execution is completed.
10089 *
10090 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
10091 */
10092 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
10093 {
10094 CPUMSetGuestNmiBlocking(pVCpu, true);
10095 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
10096 }
10097 }
10098 }
10099
10100 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
10101 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
10102 return rcStrict;
10103}
10104
10105
10106#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10107/**
10108 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
10109 * guest attempting to execute a VMX instruction.
10110 *
10111 * @returns Strict VBox status code (i.e. informational status codes too).
10112 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
10113 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
10114 *
10115 * @param pVCpu The cross context virtual CPU structure.
10116 * @param uExitReason The VM-exit reason.
10117 *
10118 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
10119 * @remarks No-long-jump zone!!!
10120 */
10121static VBOXSTRICTRC vmxHCCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
10122{
10123 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
10124 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
10125
10126 /*
10127 * The physical CPU would have already checked the CPU mode/code segment.
10128 * We shall just assert here for paranoia.
10129 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
10130 */
10131 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
10132 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
10133 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
10134
10135 if (uExitReason == VMX_EXIT_VMXON)
10136 {
10137 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
10138
10139 /*
10140 * We check CR4.VMXE because it is required to be always set while in VMX operation
10141 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
10142 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
10143 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
10144 */
10145 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
10146 {
10147 Log4Func(("CR4.VMXE is not set -> #UD\n"));
10148 vmxHCSetPendingXcptUD(pVCpu);
10149 return VINF_HM_PENDING_XCPT;
10150 }
10151 }
10152 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
10153 {
10154 /*
10155 * The guest has not entered VMX operation but attempted to execute a VMX instruction
10156 * (other than VMXON), we need to raise a #UD.
10157 */
10158 Log4Func(("Not in VMX root mode -> #UD\n"));
10159 vmxHCSetPendingXcptUD(pVCpu);
10160 return VINF_HM_PENDING_XCPT;
10161 }
10162
10163 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
10164 return VINF_SUCCESS;
10165}
10166
10167
10168/**
10169 * Decodes the memory operand of an instruction that caused a VM-exit.
10170 *
10171 * The Exit qualification field provides the displacement field for memory
10172 * operand instructions, if any.
10173 *
10174 * @returns Strict VBox status code (i.e. informational status codes too).
10175 * @retval VINF_SUCCESS if the operand was successfully decoded.
10176 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
10177 * operand.
10178 * @param pVCpu The cross context virtual CPU structure.
10179 * @param uExitInstrInfo The VM-exit instruction information field.
10180 * @param enmMemAccess The memory operand's access type (read or write).
10181 * @param GCPtrDisp The instruction displacement field, if any. For
10182 * RIP-relative addressing pass RIP + displacement here.
10183 * @param pGCPtrMem Where to store the effective destination memory address.
10184 *
10185 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
10186 * virtual-8086 mode hence skips those checks while verifying if the
10187 * segment is valid.
10188 */
10189static VBOXSTRICTRC vmxHCDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
10190 PRTGCPTR pGCPtrMem)
10191{
10192 Assert(pGCPtrMem);
10193 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
10194 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
10195 | CPUMCTX_EXTRN_CR0);
10196
10197 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
10198 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
10199 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
10200
10201 VMXEXITINSTRINFO ExitInstrInfo;
10202 ExitInstrInfo.u = uExitInstrInfo;
10203 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
10204 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
10205 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
10206 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
10207 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
10208 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
10209 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
10210 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
10211 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
10212
10213 /*
10214 * Validate instruction information.
10215 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
10216 */
10217 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
10218 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
10219 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
10220 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
10221 AssertLogRelMsgReturn(fIsMemOperand,
10222 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
10223
10224 /*
10225 * Compute the complete effective address.
10226 *
10227 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
10228 * See AMD spec. 4.5.2 "Segment Registers".
10229 */
10230 RTGCPTR GCPtrMem = GCPtrDisp;
10231 if (fBaseRegValid)
10232 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
10233 if (fIdxRegValid)
10234 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
10235
10236 RTGCPTR const GCPtrOff = GCPtrMem;
10237 if ( !fIsLongMode
10238 || iSegReg >= X86_SREG_FS)
10239 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
10240 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
10241
10242 /*
10243 * Validate effective address.
10244 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
10245 */
10246 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
10247 Assert(cbAccess > 0);
10248 if (fIsLongMode)
10249 {
10250 if (X86_IS_CANONICAL(GCPtrMem))
10251 {
10252 *pGCPtrMem = GCPtrMem;
10253 return VINF_SUCCESS;
10254 }
10255
10256 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
10257 * "Data Limit Checks in 64-bit Mode". */
10258 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
10259 vmxHCSetPendingXcptGP(pVCpu, 0);
10260 return VINF_HM_PENDING_XCPT;
10261 }
10262
10263 /*
10264 * This is a watered down version of iemMemApplySegment().
10265 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
10266 * and segment CPL/DPL checks are skipped.
10267 */
10268 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
10269 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
10270 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
10271
10272 /* Check if the segment is present and usable. */
10273 if ( pSel->Attr.n.u1Present
10274 && !pSel->Attr.n.u1Unusable)
10275 {
10276 Assert(pSel->Attr.n.u1DescType);
10277 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
10278 {
10279 /* Check permissions for the data segment. */
10280 if ( enmMemAccess == VMXMEMACCESS_WRITE
10281 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
10282 {
10283 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
10284 vmxHCSetPendingXcptGP(pVCpu, iSegReg);
10285 return VINF_HM_PENDING_XCPT;
10286 }
10287
10288 /* Check limits if it's a normal data segment. */
10289 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
10290 {
10291 if ( GCPtrFirst32 > pSel->u32Limit
10292 || GCPtrLast32 > pSel->u32Limit)
10293 {
10294 Log4Func(("Data segment limit exceeded. "
10295 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
10296 GCPtrLast32, pSel->u32Limit));
10297 if (iSegReg == X86_SREG_SS)
10298 vmxHCSetPendingXcptSS(pVCpu, 0);
10299 else
10300 vmxHCSetPendingXcptGP(pVCpu, 0);
10301 return VINF_HM_PENDING_XCPT;
10302 }
10303 }
10304 else
10305 {
10306 /* Check limits if it's an expand-down data segment.
10307 Note! The upper boundary is defined by the B bit, not the G bit! */
10308 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
10309 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
10310 {
10311 Log4Func(("Expand-down data segment limit exceeded. "
10312 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
10313 GCPtrLast32, pSel->u32Limit));
10314 if (iSegReg == X86_SREG_SS)
10315 vmxHCSetPendingXcptSS(pVCpu, 0);
10316 else
10317 vmxHCSetPendingXcptGP(pVCpu, 0);
10318 return VINF_HM_PENDING_XCPT;
10319 }
10320 }
10321 }
10322 else
10323 {
10324 /* Check permissions for the code segment. */
10325 if ( enmMemAccess == VMXMEMACCESS_WRITE
10326 || ( enmMemAccess == VMXMEMACCESS_READ
10327 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
10328 {
10329 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
10330 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
10331 vmxHCSetPendingXcptGP(pVCpu, 0);
10332 return VINF_HM_PENDING_XCPT;
10333 }
10334
10335 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
10336 if ( GCPtrFirst32 > pSel->u32Limit
10337 || GCPtrLast32 > pSel->u32Limit)
10338 {
10339 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
10340 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
10341 if (iSegReg == X86_SREG_SS)
10342 vmxHCSetPendingXcptSS(pVCpu, 0);
10343 else
10344 vmxHCSetPendingXcptGP(pVCpu, 0);
10345 return VINF_HM_PENDING_XCPT;
10346 }
10347 }
10348 }
10349 else
10350 {
10351 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
10352 vmxHCSetPendingXcptGP(pVCpu, 0);
10353 return VINF_HM_PENDING_XCPT;
10354 }
10355
10356 *pGCPtrMem = GCPtrMem;
10357 return VINF_SUCCESS;
10358}
10359#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10360
10361
10362/**
10363 * VM-exit helper for LMSW.
10364 */
10365static VBOXSTRICTRC vmxHCExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
10366{
10367 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10368 AssertRCReturn(rc, rc);
10369
10370 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
10371 AssertMsg( rcStrict == VINF_SUCCESS
10372 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10373
10374 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
10375 if (rcStrict == VINF_IEM_RAISED_XCPT)
10376 {
10377 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10378 rcStrict = VINF_SUCCESS;
10379 }
10380
10381 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitLmsw);
10382 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10383 return rcStrict;
10384}
10385
10386
10387/**
10388 * VM-exit helper for CLTS.
10389 */
10390static VBOXSTRICTRC vmxHCExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
10391{
10392 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10393 AssertRCReturn(rc, rc);
10394
10395 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
10396 AssertMsg( rcStrict == VINF_SUCCESS
10397 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10398
10399 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
10400 if (rcStrict == VINF_IEM_RAISED_XCPT)
10401 {
10402 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10403 rcStrict = VINF_SUCCESS;
10404 }
10405
10406 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitClts);
10407 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10408 return rcStrict;
10409}
10410
10411
10412/**
10413 * VM-exit helper for MOV from CRx (CRx read).
10414 */
10415static VBOXSTRICTRC vmxHCExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10416{
10417 Assert(iCrReg < 16);
10418 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
10419
10420 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10421 AssertRCReturn(rc, rc);
10422
10423 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
10424 AssertMsg( rcStrict == VINF_SUCCESS
10425 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10426
10427 if (iGReg == X86_GREG_xSP)
10428 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
10429 else
10430 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
10431#ifdef VBOX_WITH_STATISTICS
10432 switch (iCrReg)
10433 {
10434 case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Read); break;
10435 case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Read); break;
10436 case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Read); break;
10437 case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Read); break;
10438 case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Read); break;
10439 }
10440#endif
10441 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
10442 return rcStrict;
10443}
10444
10445
10446/**
10447 * VM-exit helper for MOV to CRx (CRx write).
10448 */
10449static VBOXSTRICTRC vmxHCExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10450{
10451 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
10452
10453 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
10454 AssertMsg( rcStrict == VINF_SUCCESS
10455 || rcStrict == VINF_IEM_RAISED_XCPT
10456 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10457
10458 switch (iCrReg)
10459 {
10460 case 0:
10461 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
10462 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
10463 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR0Write);
10464 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
10465 break;
10466
10467 case 2:
10468 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR2Write);
10469 /* Nothing to do here, CR2 it's not part of the VMCS. */
10470 break;
10471
10472 case 3:
10473 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
10474 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR3Write);
10475 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
10476 break;
10477
10478 case 4:
10479 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
10480 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
10481#ifdef IN_RING0
10482 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
10483 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
10484#else
10485 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
10486#endif
10487 break;
10488
10489 case 8:
10490 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
10491 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
10492 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR8Write);
10493 break;
10494
10495 default:
10496 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
10497 break;
10498 }
10499
10500 if (rcStrict == VINF_IEM_RAISED_XCPT)
10501 {
10502 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10503 rcStrict = VINF_SUCCESS;
10504 }
10505 return rcStrict;
10506}
10507
10508
10509/**
10510 * VM-exit exception handler for \#PF (Page-fault exception).
10511 *
10512 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
10513 */
10514static VBOXSTRICTRC vmxHCExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10515{
10516 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10517 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10518
10519#ifdef IN_RING0
10520 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10521 if (!VM_IS_VMX_NESTED_PAGING(pVM))
10522 { /* likely */ }
10523 else
10524#endif
10525 {
10526#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && defined(IN_RING0)
10527 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
10528#endif
10529 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
10530 if (!pVmxTransient->fVectoringDoublePF)
10531 {
10532 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
10533 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
10534 }
10535 else
10536 {
10537 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
10538 Assert(!pVmxTransient->fIsNestedGuest);
10539 vmxHCSetPendingXcptDF(pVCpu);
10540 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
10541 }
10542 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
10543 return VINF_SUCCESS;
10544 }
10545
10546 Assert(!pVmxTransient->fIsNestedGuest);
10547
10548 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
10549 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
10550 if (pVmxTransient->fVectoringPF)
10551 {
10552 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
10553 return VINF_EM_RAW_INJECT_TRPM_EVENT;
10554 }
10555
10556 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10557 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10558 AssertRCReturn(rc, rc);
10559
10560 Log4Func(("#PF: cs:rip=%#04x:%#RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
10561 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
10562
10563 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
10564 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
10565
10566 Log4Func(("#PF: rc=%Rrc\n", rc));
10567 if (rc == VINF_SUCCESS)
10568 {
10569 /*
10570 * This is typically a shadow page table sync or a MMIO instruction. But we may have
10571 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
10572 */
10573 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
10574 TRPMResetTrap(pVCpu);
10575 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPF);
10576 return rc;
10577 }
10578
10579 if (rc == VINF_EM_RAW_GUEST_TRAP)
10580 {
10581 if (!pVmxTransient->fVectoringDoublePF)
10582 {
10583 /* It's a guest page fault and needs to be reflected to the guest. */
10584 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
10585 TRPMResetTrap(pVCpu);
10586 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* In case it's a contributory #PF. */
10587 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
10588 uGstErrorCode, pVmxTransient->uExitQual);
10589 }
10590 else
10591 {
10592 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
10593 TRPMResetTrap(pVCpu);
10594 VCPU_2_VMXSTATE(pVCpu).Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
10595 vmxHCSetPendingXcptDF(pVCpu);
10596 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
10597 }
10598
10599 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF);
10600 return VINF_SUCCESS;
10601 }
10602
10603 TRPMResetTrap(pVCpu);
10604 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitShadowPFEM);
10605 return rc;
10606}
10607
10608
10609/**
10610 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
10611 *
10612 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
10613 */
10614static VBOXSTRICTRC vmxHCExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10615{
10616 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10617 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF);
10618
10619 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
10620 AssertRCReturn(rc, rc);
10621
10622 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
10623 {
10624 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
10625 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
10626
10627 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
10628 * provides VM-exit instruction length. If this causes problem later,
10629 * disassemble the instruction like it's done on AMD-V. */
10630 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
10631 AssertRCReturn(rc2, rc2);
10632 return rc;
10633 }
10634
10635 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
10636 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
10637 return VINF_SUCCESS;
10638}
10639
10640
10641/**
10642 * VM-exit exception handler for \#BP (Breakpoint exception).
10643 *
10644 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
10645 */
10646static VBOXSTRICTRC vmxHCExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10647{
10648 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10649 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP);
10650
10651 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10652 AssertRCReturn(rc, rc);
10653
10654 if (!pVmxTransient->fIsNestedGuest)
10655 rc = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
10656 else
10657 rc = VINF_EM_RAW_GUEST_TRAP;
10658
10659 if (rc == VINF_EM_RAW_GUEST_TRAP)
10660 {
10661 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
10662 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
10663 rc = VINF_SUCCESS;
10664 }
10665
10666 Assert(rc == VINF_SUCCESS || rc == VINF_EM_DBG_BREAKPOINT);
10667 return rc;
10668}
10669
10670
10671/**
10672 * VM-exit exception handler for \#AC (Alignment-check exception).
10673 *
10674 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
10675 */
10676static VBOXSTRICTRC vmxHCExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10677{
10678 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10679
10680 /*
10681 * Detect #ACs caused by host having enabled split-lock detection.
10682 * Emulate such instructions.
10683 */
10684 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
10685 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
10686 AssertRCReturn(rc, rc);
10687 /** @todo detect split lock in cpu feature? */
10688 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
10689 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
10690 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
10691 || CPUMGetGuestCPL(pVCpu) != 3
10692 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
10693 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
10694 {
10695 /*
10696 * Check for debug/trace events and import state accordingly.
10697 */
10698 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
10699 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10700 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
10701#ifdef IN_RING0
10702 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
10703#endif
10704 )
10705 {
10706 if (pVM->cCpus == 1)
10707 {
10708#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
10709 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
10710#else
10711 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10712#endif
10713 AssertRCReturn(rc, rc);
10714 }
10715 }
10716 else
10717 {
10718 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10719 AssertRCReturn(rc, rc);
10720
10721 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
10722
10723 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
10724 {
10725 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
10726 if (rcStrict != VINF_SUCCESS)
10727 return rcStrict;
10728 }
10729 }
10730
10731 /*
10732 * Emulate the instruction.
10733 *
10734 * We have to ignore the LOCK prefix here as we must not retrigger the
10735 * detection on the host. This isn't all that satisfactory, though...
10736 */
10737 if (pVM->cCpus == 1)
10738 {
10739 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
10740 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
10741
10742 /** @todo For SMP configs we should do a rendezvous here. */
10743 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
10744 if (rcStrict == VINF_SUCCESS)
10745#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
10746 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
10747 HM_CHANGED_GUEST_RIP
10748 | HM_CHANGED_GUEST_RFLAGS
10749 | HM_CHANGED_GUEST_GPRS_MASK
10750 | HM_CHANGED_GUEST_CS
10751 | HM_CHANGED_GUEST_SS);
10752#else
10753 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
10754#endif
10755 else if (rcStrict == VINF_IEM_RAISED_XCPT)
10756 {
10757 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
10758 rcStrict = VINF_SUCCESS;
10759 }
10760 return rcStrict;
10761 }
10762 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
10763 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
10764 return VINF_EM_EMULATE_SPLIT_LOCK;
10765 }
10766
10767 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
10768 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
10769 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
10770
10771 /* Re-inject it. We'll detect any nesting before getting here. */
10772 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
10773 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
10774 return VINF_SUCCESS;
10775}
10776
10777
10778/**
10779 * VM-exit exception handler for \#DB (Debug exception).
10780 *
10781 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
10782 */
10783static VBOXSTRICTRC vmxHCExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10784{
10785 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10786 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB);
10787
10788 /*
10789 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
10790 */
10791 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
10792
10793 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
10794 uint64_t const uDR6 = X86_DR6_INIT_VAL
10795 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
10796 | X86_DR6_BD | X86_DR6_BS));
10797
10798 int rc;
10799 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10800 if (!pVmxTransient->fIsNestedGuest)
10801 {
10802 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
10803
10804 /*
10805 * Prevents stepping twice over the same instruction when the guest is stepping using
10806 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
10807 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
10808 */
10809 if ( rc == VINF_EM_DBG_STEPPED
10810 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
10811 {
10812 Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
10813 rc = VINF_EM_RAW_GUEST_TRAP;
10814 }
10815 }
10816 else
10817 rc = VINF_EM_RAW_GUEST_TRAP;
10818 Log6Func(("rc=%Rrc\n", rc));
10819 if (rc == VINF_EM_RAW_GUEST_TRAP)
10820 {
10821 /*
10822 * The exception was for the guest. Update DR6, DR7.GD and
10823 * IA32_DEBUGCTL.LBR before forwarding it.
10824 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
10825 */
10826#ifdef IN_RING0
10827 VMMRZCallRing3Disable(pVCpu);
10828 HM_DISABLE_PREEMPT(pVCpu);
10829
10830 pCtx->dr[6] &= ~X86_DR6_B_MASK;
10831 pCtx->dr[6] |= uDR6;
10832 if (CPUMIsGuestDebugStateActive(pVCpu))
10833 ASMSetDR6(pCtx->dr[6]);
10834
10835 HM_RESTORE_PREEMPT();
10836 VMMRZCallRing3Enable(pVCpu);
10837#else
10838 /** @todo */
10839#endif
10840
10841 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
10842 AssertRCReturn(rc, rc);
10843
10844 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
10845 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
10846
10847 /* Paranoia. */
10848 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
10849 pCtx->dr[7] |= X86_DR7_RA1_MASK;
10850
10851 rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
10852 AssertRC(rc);
10853
10854 /*
10855 * Raise #DB in the guest.
10856 *
10857 * It is important to reflect exactly what the VM-exit gave us (preserving the
10858 * interruption-type) rather than use vmxHCSetPendingXcptDB() as the #DB could've
10859 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
10860 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
10861 *
10862 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
10863 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
10864 */
10865 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
10866 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
10867 return VINF_SUCCESS;
10868 }
10869
10870 /*
10871 * Not a guest trap, must be a hypervisor related debug event then.
10872 * Update DR6 in case someone is interested in it.
10873 */
10874 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
10875 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
10876 CPUMSetHyperDR6(pVCpu, uDR6);
10877
10878 return rc;
10879}
10880
10881
10882/**
10883 * Hacks its way around the lovely mesa driver's backdoor accesses.
10884 *
10885 * @sa hmR0SvmHandleMesaDrvGp.
10886 */
10887static int vmxHCHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
10888{
10889 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
10890 RT_NOREF(pCtx);
10891
10892 /* For now we'll just skip the instruction. */
10893 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
10894}
10895
10896
10897/**
10898 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
10899 * backdoor logging w/o checking what it is running inside.
10900 *
10901 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
10902 * backdoor port and magic numbers loaded in registers.
10903 *
10904 * @returns true if it is, false if it isn't.
10905 * @sa hmR0SvmIsMesaDrvGp.
10906 */
10907DECLINLINE(bool) vmxHCIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
10908{
10909 /* 0xed: IN eAX,dx */
10910 uint8_t abInstr[1];
10911 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
10912 return false;
10913
10914 /* Check that it is #GP(0). */
10915 if (pVmxTransient->uExitIntErrorCode != 0)
10916 return false;
10917
10918 /* Check magic and port. */
10919 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
10920 /*Log(("vmxHCIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
10921 if (pCtx->rax != UINT32_C(0x564d5868))
10922 return false;
10923 if (pCtx->dx != UINT32_C(0x5658))
10924 return false;
10925
10926 /* Flat ring-3 CS. */
10927 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
10928 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
10929 /*Log(("vmxHCIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
10930 if (pCtx->cs.Attr.n.u2Dpl != 3)
10931 return false;
10932 if (pCtx->cs.u64Base != 0)
10933 return false;
10934
10935 /* Check opcode. */
10936 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
10937 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
10938 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
10939 /*Log(("vmxHCIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
10940 if (RT_FAILURE(rc))
10941 return false;
10942 if (abInstr[0] != 0xed)
10943 return false;
10944
10945 return true;
10946}
10947
10948
10949/**
10950 * VM-exit exception handler for \#GP (General-protection exception).
10951 *
10952 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
10953 */
10954static VBOXSTRICTRC vmxHCExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10955{
10956 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
10957 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP);
10958
10959 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10960 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10961#ifdef IN_RING0
10962 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
10963 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
10964 { /* likely */ }
10965 else
10966#endif
10967 {
10968#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
10969# ifdef IN_RING0
10970 Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
10971# else
10972 Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
10973# endif
10974#endif
10975 /*
10976 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
10977 * executing a nested-guest, reflect #GP to the guest or nested-guest.
10978 */
10979 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10980 AssertRCReturn(rc, rc);
10981 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
10982 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
10983
10984 if ( pVmxTransient->fIsNestedGuest
10985 || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
10986 || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
10987 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
10988 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
10989 else
10990 rc = vmxHCHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
10991 return rc;
10992 }
10993
10994#ifdef IN_RING0
10995 Assert(CPUMIsGuestInRealModeEx(pCtx));
10996 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
10997 Assert(!pVmxTransient->fIsNestedGuest);
10998
10999 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11000 AssertRCReturn(rc, rc);
11001
11002 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
11003 if (rcStrict == VINF_SUCCESS)
11004 {
11005 if (!CPUMIsGuestInRealModeEx(pCtx))
11006 {
11007 /*
11008 * The guest is no longer in real-mode, check if we can continue executing the
11009 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
11010 */
11011 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
11012 if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
11013 {
11014 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
11015 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
11016 }
11017 else
11018 {
11019 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
11020 rcStrict = VINF_EM_RESCHEDULE;
11021 }
11022 }
11023 else
11024 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
11025 }
11026 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11027 {
11028 rcStrict = VINF_SUCCESS;
11029 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
11030 }
11031 return VBOXSTRICTRC_VAL(rcStrict);
11032#endif
11033}
11034
11035
11036/**
11037 * VM-exit exception handler wrapper for all other exceptions that are not handled
11038 * by a specific handler.
11039 *
11040 * This simply re-injects the exception back into the VM without any special
11041 * processing.
11042 *
11043 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
11044 */
11045static VBOXSTRICTRC vmxHCExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11046{
11047 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11048
11049#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11050# ifdef IN_RING0
11051 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11052 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
11053 ("uVector=%#x u32XcptBitmap=%#X32\n",
11054 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
11055 NOREF(pVmcsInfo);
11056# endif
11057#endif
11058
11059 /*
11060 * Re-inject the exception into the guest. This cannot be a double-fault condition which
11061 * would have been handled while checking exits due to event delivery.
11062 */
11063 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11064
11065#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11066 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11067 AssertRCReturn(rc, rc);
11068 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
11069#endif
11070
11071#ifdef VBOX_WITH_STATISTICS
11072 switch (uVector)
11073 {
11074 case X86_XCPT_DE: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDE); break;
11075 case X86_XCPT_DB: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDB); break;
11076 case X86_XCPT_BP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBP); break;
11077 case X86_XCPT_OF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
11078 case X86_XCPT_BR: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestBR); break;
11079 case X86_XCPT_UD: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestUD); break;
11080 case X86_XCPT_NM: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestOF); break;
11081 case X86_XCPT_DF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestDF); break;
11082 case X86_XCPT_TS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestTS); break;
11083 case X86_XCPT_NP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestNP); break;
11084 case X86_XCPT_SS: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestSS); break;
11085 case X86_XCPT_GP: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestGP); break;
11086 case X86_XCPT_PF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestPF); break;
11087 case X86_XCPT_MF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestMF); break;
11088 case X86_XCPT_AC: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC); break;
11089 case X86_XCPT_XF: STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXF); break;
11090 default:
11091 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestXcpUnk);
11092 break;
11093 }
11094#endif
11095
11096 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
11097 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
11098 NOREF(uVector);
11099
11100 /* Re-inject the original exception into the guest. */
11101 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11102 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11103 return VINF_SUCCESS;
11104}
11105
11106
11107/**
11108 * VM-exit exception handler for all exceptions (except NMIs!).
11109 *
11110 * @remarks This may be called for both guests and nested-guests. Take care to not
11111 * make assumptions and avoid doing anything that is not relevant when
11112 * executing a nested-guest (e.g., Mesa driver hacks).
11113 */
11114static VBOXSTRICTRC vmxHCExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11115{
11116 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
11117
11118 /*
11119 * If this VM-exit occurred while delivering an event through the guest IDT, take
11120 * action based on the return code and additional hints (e.g. for page-faults)
11121 * that will be updated in the VMX transient structure.
11122 */
11123 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
11124 if (rcStrict == VINF_SUCCESS)
11125 {
11126 /*
11127 * If an exception caused a VM-exit due to delivery of an event, the original
11128 * event may have to be re-injected into the guest. We shall reinject it and
11129 * continue guest execution. However, page-fault is a complicated case and
11130 * needs additional processing done in vmxHCExitXcptPF().
11131 */
11132 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
11133 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11134 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending
11135 || uVector == X86_XCPT_PF)
11136 {
11137 switch (uVector)
11138 {
11139 case X86_XCPT_PF: return vmxHCExitXcptPF(pVCpu, pVmxTransient);
11140 case X86_XCPT_GP: return vmxHCExitXcptGP(pVCpu, pVmxTransient);
11141 case X86_XCPT_MF: return vmxHCExitXcptMF(pVCpu, pVmxTransient);
11142 case X86_XCPT_DB: return vmxHCExitXcptDB(pVCpu, pVmxTransient);
11143 case X86_XCPT_BP: return vmxHCExitXcptBP(pVCpu, pVmxTransient);
11144 case X86_XCPT_AC: return vmxHCExitXcptAC(pVCpu, pVmxTransient);
11145 default:
11146 return vmxHCExitXcptOthers(pVCpu, pVmxTransient);
11147 }
11148 }
11149 /* else: inject pending event before resuming guest execution. */
11150 }
11151 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
11152 {
11153 Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
11154 rcStrict = VINF_SUCCESS;
11155 }
11156
11157 return rcStrict;
11158}
11159/** @} */
11160
11161
11162/** @name VM-exit handlers.
11163 * @{
11164 */
11165/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11166/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
11167/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11168
11169/**
11170 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
11171 */
11172HMVMX_EXIT_DECL vmxHCExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11173{
11174 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11175 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
11176
11177#ifdef IN_RING0
11178 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
11179 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
11180 return VINF_SUCCESS;
11181 return VINF_EM_RAW_INTERRUPT;
11182#else
11183 return VINF_SUCCESS;
11184#endif
11185}
11186
11187
11188/**
11189 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
11190 * VM-exit.
11191 */
11192HMVMX_EXIT_DECL vmxHCExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11193{
11194 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11195 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
11196
11197 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
11198
11199 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11200 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11201 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
11202
11203 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11204 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
11205 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
11206 NOREF(pVmcsInfo);
11207
11208 VBOXSTRICTRC rcStrict;
11209 switch (uExitIntType)
11210 {
11211#ifdef IN_RING0 /* NMIs should never reach R3. */
11212 /*
11213 * Host physical NMIs:
11214 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
11215 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
11216 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
11217 *
11218 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
11219 * See Intel spec. 27.5.5 "Updating Non-Register State".
11220 */
11221 case VMX_EXIT_INT_INFO_TYPE_NMI:
11222 {
11223 rcStrict = vmxHCExitHostNmi(pVCpu, pVmcsInfo);
11224 break;
11225 }
11226#endif
11227
11228 /*
11229 * Privileged software exceptions (#DB from ICEBP),
11230 * Software exceptions (#BP and #OF),
11231 * Hardware exceptions:
11232 * Process the required exceptions and resume guest execution if possible.
11233 */
11234 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11235 Assert(uVector == X86_XCPT_DB);
11236 RT_FALL_THRU();
11237 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11238 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
11239 RT_FALL_THRU();
11240 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11241 {
11242 NOREF(uVector);
11243 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
11244 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
11245 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
11246 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
11247
11248 rcStrict = vmxHCExitXcpt(pVCpu, pVmxTransient);
11249 break;
11250 }
11251
11252 default:
11253 {
11254 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
11255 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
11256 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
11257 break;
11258 }
11259 }
11260
11261 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi, y3);
11262 return rcStrict;
11263}
11264
11265
11266/**
11267 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
11268 */
11269HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11270{
11271 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11272
11273 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
11274 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11275 vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
11276
11277 /* Evaluate and deliver pending events and resume guest execution. */
11278 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIntWindow);
11279 return VINF_SUCCESS;
11280}
11281
11282
11283/**
11284 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
11285 */
11286HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11287{
11288 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11289
11290 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11291 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
11292 {
11293 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
11294 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
11295 }
11296
11297 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
11298
11299 /*
11300 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
11301 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
11302 */
11303 uint32_t fIntrState;
11304 int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
11305 AssertRC(rc);
11306 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
11307 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
11308 {
11309 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
11310 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
11311
11312 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
11313 rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
11314 AssertRC(rc);
11315 }
11316
11317 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
11318 vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
11319
11320 /* Evaluate and deliver pending events and resume guest execution. */
11321 return VINF_SUCCESS;
11322}
11323
11324
11325/**
11326 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
11327 */
11328HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11329{
11330 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11331 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
11332}
11333
11334
11335/**
11336 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
11337 */
11338HMVMX_EXIT_NSRC_DECL vmxHCExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11339{
11340 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11341 return vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
11342}
11343
11344
11345/**
11346 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
11347 */
11348HMVMX_EXIT_DECL vmxHCExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11349{
11350 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11351
11352 /*
11353 * Get the state we need and update the exit history entry.
11354 */
11355 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11356 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
11357
11358 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
11359 AssertRCReturn(rc, rc);
11360
11361 VBOXSTRICTRC rcStrict;
11362 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
11363 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
11364 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
11365 if (!pExitRec)
11366 {
11367 /*
11368 * Regular CPUID instruction execution.
11369 */
11370 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
11371 if (rcStrict == VINF_SUCCESS)
11372 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11373 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11374 {
11375 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
11376 rcStrict = VINF_SUCCESS;
11377 }
11378 }
11379 else
11380 {
11381 /*
11382 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
11383 */
11384 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11385 AssertRCReturn(rc2, rc2);
11386
11387 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
11388 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
11389
11390 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
11391 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
11392
11393 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
11394 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
11395 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
11396 }
11397 return rcStrict;
11398}
11399
11400
11401/**
11402 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
11403 */
11404HMVMX_EXIT_DECL vmxHCExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11405{
11406 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11407
11408 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11409 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
11410 AssertRCReturn(rc, rc);
11411
11412 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
11413 return VINF_EM_RAW_EMULATE_INSTR;
11414
11415 AssertMsgFailed(("vmxHCExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
11416 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
11417}
11418
11419
11420/**
11421 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
11422 */
11423HMVMX_EXIT_DECL vmxHCExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11424{
11425 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11426
11427 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11428 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
11429 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
11430 AssertRCReturn(rc, rc);
11431
11432 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
11433 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
11434 {
11435 /* If we get a spurious VM-exit when TSC offsetting is enabled,
11436 we must reset offsetting on VM-entry. See @bugref{6634}. */
11437 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
11438 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11439 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11440 }
11441 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11442 {
11443 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
11444 rcStrict = VINF_SUCCESS;
11445 }
11446 return rcStrict;
11447}
11448
11449
11450/**
11451 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
11452 */
11453HMVMX_EXIT_DECL vmxHCExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11454{
11455 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11456
11457 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11458 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
11459 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
11460 AssertRCReturn(rc, rc);
11461
11462 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
11463 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
11464 {
11465 /* If we get a spurious VM-exit when TSC offsetting is enabled,
11466 we must reset offsetting on VM-reentry. See @bugref{6634}. */
11467 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
11468 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11469 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11470 }
11471 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11472 {
11473 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
11474 rcStrict = VINF_SUCCESS;
11475 }
11476 return rcStrict;
11477}
11478
11479
11480/**
11481 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
11482 */
11483HMVMX_EXIT_DECL vmxHCExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11484{
11485 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11486
11487 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11488 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
11489 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
11490 AssertRCReturn(rc, rc);
11491
11492 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11493 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
11494 if (RT_LIKELY(rc == VINF_SUCCESS))
11495 {
11496 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
11497 Assert(pVmxTransient->cbExitInstr == 2);
11498 }
11499 else
11500 {
11501 AssertMsgFailed(("vmxHCExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
11502 rc = VERR_EM_INTERPRETER;
11503 }
11504 return rc;
11505}
11506
11507
11508/**
11509 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
11510 */
11511HMVMX_EXIT_DECL vmxHCExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11512{
11513 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11514
11515 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
11516 if (EMAreHypercallInstructionsEnabled(pVCpu))
11517 {
11518 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11519 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
11520 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
11521 AssertRCReturn(rc, rc);
11522
11523 /* Perform the hypercall. */
11524 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
11525 if (rcStrict == VINF_SUCCESS)
11526 {
11527 rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
11528 AssertRCReturn(rc, rc);
11529 }
11530 else
11531 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
11532 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
11533 || RT_FAILURE(rcStrict));
11534
11535 /* If the hypercall changes anything other than guest's general-purpose registers,
11536 we would need to reload the guest changed bits here before VM-entry. */
11537 }
11538 else
11539 Log4Func(("Hypercalls not enabled\n"));
11540
11541 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
11542 if (RT_FAILURE(rcStrict))
11543 {
11544 vmxHCSetPendingXcptUD(pVCpu);
11545 rcStrict = VINF_SUCCESS;
11546 }
11547
11548 return rcStrict;
11549}
11550
11551
11552/**
11553 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
11554 */
11555HMVMX_EXIT_DECL vmxHCExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11556{
11557 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11558#ifdef IN_RING0
11559 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
11560#endif
11561
11562 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11563 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11564 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
11565 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
11566 AssertRCReturn(rc, rc);
11567
11568 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
11569
11570 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
11571 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11572 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11573 {
11574 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
11575 rcStrict = VINF_SUCCESS;
11576 }
11577 else
11578 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
11579 VBOXSTRICTRC_VAL(rcStrict)));
11580 return rcStrict;
11581}
11582
11583
11584/**
11585 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
11586 */
11587HMVMX_EXIT_DECL vmxHCExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11588{
11589 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11590
11591 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11592 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
11593 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11594 AssertRCReturn(rc, rc);
11595
11596 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
11597 if (rcStrict == VINF_SUCCESS)
11598 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11599 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11600 {
11601 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
11602 rcStrict = VINF_SUCCESS;
11603 }
11604
11605 return rcStrict;
11606}
11607
11608
11609/**
11610 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
11611 */
11612HMVMX_EXIT_DECL vmxHCExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11613{
11614 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11615
11616 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11617 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
11618 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
11619 AssertRCReturn(rc, rc);
11620
11621 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
11622 if (RT_SUCCESS(rcStrict))
11623 {
11624 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11625 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
11626 rcStrict = VINF_SUCCESS;
11627 }
11628
11629 return rcStrict;
11630}
11631
11632
11633/**
11634 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
11635 * VM-exit.
11636 */
11637HMVMX_EXIT_DECL vmxHCExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11638{
11639 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11640 return VINF_EM_RESET;
11641}
11642
11643
11644/**
11645 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
11646 */
11647HMVMX_EXIT_DECL vmxHCExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11648{
11649 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11650
11651 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
11652 AssertRCReturn(rc, rc);
11653
11654 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
11655 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
11656 rc = VINF_SUCCESS;
11657 else
11658 rc = VINF_EM_HALT;
11659
11660 if (rc != VINF_SUCCESS)
11661 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchHltToR3);
11662 return rc;
11663}
11664
11665
11666/**
11667 * VM-exit handler for instructions that result in a \#UD exception delivered to
11668 * the guest.
11669 */
11670HMVMX_EXIT_NSRC_DECL vmxHCExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11671{
11672 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11673 vmxHCSetPendingXcptUD(pVCpu);
11674 return VINF_SUCCESS;
11675}
11676
11677
11678/**
11679 * VM-exit handler for expiry of the VMX-preemption timer.
11680 */
11681HMVMX_EXIT_DECL vmxHCExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11682{
11683 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11684
11685 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
11686 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11687Log12(("vmxHCExitPreemptTimer:\n"));
11688
11689 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
11690 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11691 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
11692 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitPreemptTimer);
11693 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
11694}
11695
11696
11697/**
11698 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
11699 */
11700HMVMX_EXIT_DECL vmxHCExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11701{
11702 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11703
11704 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11705 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
11706 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
11707 AssertRCReturn(rc, rc);
11708
11709 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
11710 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
11711 : HM_CHANGED_RAISED_XCPT_MASK);
11712
11713#ifdef IN_RING0
11714 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11715 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
11716 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
11717 {
11718 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
11719 vmxHCUpdateStartVmFunction(pVCpu);
11720 }
11721#endif
11722
11723 return rcStrict;
11724}
11725
11726
11727/**
11728 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
11729 */
11730HMVMX_EXIT_DECL vmxHCExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11731{
11732 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11733
11734 /** @todo Enable the new code after finding a reliably guest test-case. */
11735#if 1
11736 return VERR_EM_INTERPRETER;
11737#else
11738 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
11739 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
11740 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
11741 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
11742 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
11743 AssertRCReturn(rc, rc);
11744
11745 /* Paranoia. Ensure this has a memory operand. */
11746 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
11747
11748 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
11749 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
11750 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
11751 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
11752
11753 RTGCPTR GCPtrDesc;
11754 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
11755
11756 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
11757 GCPtrDesc, uType);
11758 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
11759 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11760 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11761 {
11762 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
11763 rcStrict = VINF_SUCCESS;
11764 }
11765 return rcStrict;
11766#endif
11767}
11768
11769
11770/**
11771 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
11772 * VM-exit.
11773 */
11774HMVMX_EXIT_NSRC_DECL vmxHCExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11775{
11776 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11777 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11778 AssertRCReturn(rc, rc);
11779
11780 rc = vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
11781 if (RT_FAILURE(rc))
11782 return rc;
11783
11784 uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
11785 NOREF(uInvalidReason);
11786
11787#ifdef VBOX_STRICT
11788 uint32_t fIntrState;
11789 uint64_t u64Val;
11790 vmxHCReadEntryIntInfoVmcs(pVCpu, pVmxTransient);
11791 vmxHCReadEntryXcptErrorCodeVmcs(pVCpu, pVmxTransient);
11792 vmxHCReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
11793
11794 Log4(("uInvalidReason %u\n", uInvalidReason));
11795 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
11796 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
11797 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
11798
11799 rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
11800 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
11801 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
11802 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
11803 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
11804 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
11805 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
11806 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
11807 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
11808 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
11809 rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
11810 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
11811# ifdef IN_RING0
11812 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
11813 {
11814 rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
11815 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
11816 }
11817
11818 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
11819# endif
11820#endif
11821
11822 return VERR_VMX_INVALID_GUEST_STATE;
11823}
11824
11825/**
11826 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
11827 */
11828HMVMX_EXIT_NSRC_DECL vmxHCExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11829{
11830 /*
11831 * Cumulative notes of all recognized but unexpected VM-exits.
11832 *
11833 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
11834 * nested-paging is used.
11835 *
11836 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
11837 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
11838 * this function (and thereby stop VM execution) for handling such instructions.
11839 *
11840 *
11841 * VMX_EXIT_INIT_SIGNAL:
11842 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
11843 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
11844 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
11845 *
11846 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
11847 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
11848 * See Intel spec. "23.8 Restrictions on VMX operation".
11849 *
11850 * VMX_EXIT_SIPI:
11851 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
11852 * activity state is used. We don't make use of it as our guests don't have direct
11853 * access to the host local APIC.
11854 *
11855 * See Intel spec. 25.3 "Other Causes of VM-exits".
11856 *
11857 * VMX_EXIT_IO_SMI:
11858 * VMX_EXIT_SMI:
11859 * This can only happen if we support dual-monitor treatment of SMI, which can be
11860 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
11861 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
11862 * VMX root mode or receive an SMI. If we get here, something funny is going on.
11863 *
11864 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
11865 * See Intel spec. 25.3 "Other Causes of VM-Exits"
11866 *
11867 * VMX_EXIT_ERR_MSR_LOAD:
11868 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
11869 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
11870 * execution.
11871 *
11872 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
11873 *
11874 * VMX_EXIT_ERR_MACHINE_CHECK:
11875 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
11876 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
11877 * #MC exception abort class exception is raised. We thus cannot assume a
11878 * reasonable chance of continuing any sort of execution and we bail.
11879 *
11880 * See Intel spec. 15.1 "Machine-check Architecture".
11881 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
11882 *
11883 * VMX_EXIT_PML_FULL:
11884 * VMX_EXIT_VIRTUALIZED_EOI:
11885 * VMX_EXIT_APIC_WRITE:
11886 * We do not currently support any of these features and thus they are all unexpected
11887 * VM-exits.
11888 *
11889 * VMX_EXIT_GDTR_IDTR_ACCESS:
11890 * VMX_EXIT_LDTR_TR_ACCESS:
11891 * VMX_EXIT_RDRAND:
11892 * VMX_EXIT_RSM:
11893 * VMX_EXIT_VMFUNC:
11894 * VMX_EXIT_ENCLS:
11895 * VMX_EXIT_RDSEED:
11896 * VMX_EXIT_XSAVES:
11897 * VMX_EXIT_XRSTORS:
11898 * VMX_EXIT_UMWAIT:
11899 * VMX_EXIT_TPAUSE:
11900 * VMX_EXIT_LOADIWKEY:
11901 * These VM-exits are -not- caused unconditionally by execution of the corresponding
11902 * instruction. Any VM-exit for these instructions indicate a hardware problem,
11903 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
11904 *
11905 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
11906 */
11907 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11908 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
11909 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
11910}
11911
11912
11913/**
11914 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
11915 */
11916HMVMX_EXIT_DECL vmxHCExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11917{
11918 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11919
11920 /** @todo Optimize this: We currently drag in the whole MSR state
11921 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
11922 * MSRs required. That would require changes to IEM and possibly CPUM too.
11923 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
11924 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11925 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
11926 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
11927 switch (idMsr)
11928 {
11929 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
11930 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
11931 }
11932
11933 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
11934 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
11935 AssertRCReturn(rc, rc);
11936
11937 Log4Func(("ecx=%#RX32\n", idMsr));
11938
11939#if defined(VBOX_STRICT) && defined(IN_RING0)
11940 Assert(!pVmxTransient->fIsNestedGuest);
11941 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
11942 {
11943 if ( vmxHCIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
11944 && idMsr != MSR_K6_EFER)
11945 {
11946 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
11947 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
11948 }
11949 if (vmxHCIsLazyGuestMsr(pVCpu, idMsr))
11950 {
11951 Assert(pVmcsInfo->pvMsrBitmap);
11952 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
11953 if (fMsrpm & VMXMSRPM_ALLOW_RD)
11954 {
11955 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
11956 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
11957 }
11958 }
11959 }
11960#endif
11961
11962 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
11963 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitRdmsr);
11964 if (rcStrict == VINF_SUCCESS)
11965 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11966 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11967 {
11968 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
11969 rcStrict = VINF_SUCCESS;
11970 }
11971 else
11972 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
11973 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
11974
11975 return rcStrict;
11976}
11977
11978
11979/**
11980 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
11981 */
11982HMVMX_EXIT_DECL vmxHCExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
11983{
11984 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
11985
11986 /** @todo Optimize this: We currently drag in the whole MSR state
11987 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
11988 * MSRs required. That would require changes to IEM and possibly CPUM too.
11989 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
11990 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
11991 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
11992
11993 /*
11994 * The FS and GS base MSRs are not part of the above all-MSRs mask.
11995 * Although we don't need to fetch the base as it will be overwritten shortly, while
11996 * loading guest-state we would also load the entire segment register including limit
11997 * and attributes and thus we need to load them here.
11998 */
11999 switch (idMsr)
12000 {
12001 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
12002 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
12003 }
12004
12005 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
12006 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
12007 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImport);
12008 AssertRCReturn(rc, rc);
12009
12010 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
12011
12012 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
12013 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitWrmsr);
12014
12015 if (rcStrict == VINF_SUCCESS)
12016 {
12017 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
12018
12019 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
12020 if ( idMsr == MSR_IA32_APICBASE
12021 || ( idMsr >= MSR_IA32_X2APIC_START
12022 && idMsr <= MSR_IA32_X2APIC_END))
12023 {
12024 /*
12025 * We've already saved the APIC related guest-state (TPR) in post-run phase.
12026 * When full APIC register virtualization is implemented we'll have to make
12027 * sure APIC state is saved from the VMCS before IEM changes it.
12028 */
12029 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
12030 }
12031 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
12032 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
12033 else if (idMsr == MSR_K6_EFER)
12034 {
12035 /*
12036 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
12037 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
12038 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
12039 */
12040 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
12041 }
12042
12043 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
12044 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
12045 {
12046 switch (idMsr)
12047 {
12048 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
12049 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
12050 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
12051 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS); break;
12052 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS); break;
12053 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
12054 default:
12055 {
12056#ifdef IN_RING0
12057 if (vmxHCIsLazyGuestMsr(pVCpu, idMsr))
12058 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
12059 else if (vmxHCIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
12060 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
12061#else
12062 AssertMsgFailed(("TODO\n"));
12063#endif
12064 break;
12065 }
12066 }
12067 }
12068#if defined(VBOX_STRICT) && defined(IN_RING0)
12069 else
12070 {
12071 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
12072 switch (idMsr)
12073 {
12074 case MSR_IA32_SYSENTER_CS:
12075 case MSR_IA32_SYSENTER_EIP:
12076 case MSR_IA32_SYSENTER_ESP:
12077 case MSR_K8_FS_BASE:
12078 case MSR_K8_GS_BASE:
12079 {
12080 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
12081 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
12082 }
12083
12084 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
12085 default:
12086 {
12087 if (vmxHCIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
12088 {
12089 /* EFER MSR writes are always intercepted. */
12090 if (idMsr != MSR_K6_EFER)
12091 {
12092 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
12093 idMsr));
12094 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
12095 }
12096 }
12097
12098 if (vmxHCIsLazyGuestMsr(pVCpu, idMsr))
12099 {
12100 Assert(pVmcsInfo->pvMsrBitmap);
12101 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
12102 if (fMsrpm & VMXMSRPM_ALLOW_WR)
12103 {
12104 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
12105 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
12106 }
12107 }
12108 break;
12109 }
12110 }
12111 }
12112#endif /* VBOX_STRICT */
12113 }
12114 else if (rcStrict == VINF_IEM_RAISED_XCPT)
12115 {
12116 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
12117 rcStrict = VINF_SUCCESS;
12118 }
12119 else
12120 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
12121 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
12122
12123 return rcStrict;
12124}
12125
12126
12127/**
12128 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
12129 */
12130HMVMX_EXIT_DECL vmxHCExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
12131{
12132 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12133
12134 /** @todo The guest has likely hit a contended spinlock. We might want to
12135 * poke a schedule different guest VCPU. */
12136 int rc = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
12137 if (RT_SUCCESS(rc))
12138 return VINF_EM_RAW_INTERRUPT;
12139
12140 AssertMsgFailed(("vmxHCExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
12141 return rc;
12142}
12143
12144
12145/**
12146 * VM-exit handler for when the TPR value is lowered below the specified
12147 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
12148 */
12149HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
12150{
12151 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12152 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
12153
12154 /*
12155 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
12156 * We'll re-evaluate pending interrupts and inject them before the next VM
12157 * entry so we can just continue execution here.
12158 */
12159 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTprBelowThreshold);
12160 return VINF_SUCCESS;
12161}
12162
12163
12164/**
12165 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
12166 * VM-exit.
12167 *
12168 * @retval VINF_SUCCESS when guest execution can continue.
12169 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
12170 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
12171 * incompatible guest state for VMX execution (real-on-v86 case).
12172 */
12173HMVMX_EXIT_DECL vmxHCExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
12174{
12175 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12176 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
12177
12178 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
12179 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
12180 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
12181
12182 VBOXSTRICTRC rcStrict;
12183 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
12184 uint64_t const uExitQual = pVmxTransient->uExitQual;
12185 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
12186 switch (uAccessType)
12187 {
12188 /*
12189 * MOV to CRx.
12190 */
12191 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
12192 {
12193 /*
12194 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
12195 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
12196 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
12197 * PAE PDPTEs as well.
12198 */
12199 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
12200 AssertRCReturn(rc, rc);
12201
12202 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
12203#ifdef IN_RING0
12204 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
12205#endif
12206 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
12207 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
12208
12209 /*
12210 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
12211 * - When nested paging isn't used.
12212 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
12213 * - We are executing in the VM debug loop.
12214 */
12215#ifdef IN_RING0
12216 Assert( iCrReg != 3
12217 || !VM_IS_VMX_NESTED_PAGING(pVM)
12218 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
12219 || pVCpu->hmr0.s.fUsingDebugLoop);
12220#else
12221 Assert( iCrReg != 3
12222 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
12223#endif
12224
12225 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
12226 Assert( iCrReg != 8
12227 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
12228
12229 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
12230 AssertMsg( rcStrict == VINF_SUCCESS
12231 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12232
12233#ifdef IN_RING0
12234 /*
12235 * This is a kludge for handling switches back to real mode when we try to use
12236 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
12237 * deal with special selector values, so we have to return to ring-3 and run
12238 * there till the selector values are V86 mode compatible.
12239 *
12240 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
12241 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
12242 * this function.
12243 */
12244 if ( iCrReg == 0
12245 && rcStrict == VINF_SUCCESS
12246 && !VM_IS_VMX_UNRESTRICTED_GUEST(pVM)
12247 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
12248 && (uOldCr0 & X86_CR0_PE)
12249 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
12250 {
12251 /** @todo Check selectors rather than returning all the time. */
12252 Assert(!pVmxTransient->fIsNestedGuest);
12253 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
12254 rcStrict = VINF_EM_RESCHEDULE_REM;
12255 }
12256#endif
12257
12258 break;
12259 }
12260
12261 /*
12262 * MOV from CRx.
12263 */
12264 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
12265 {
12266 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
12267 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
12268
12269 /*
12270 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
12271 * - When nested paging isn't used.
12272 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
12273 * - We are executing in the VM debug loop.
12274 */
12275#ifdef IN_RING0
12276 Assert( iCrReg != 3
12277 || !VM_IS_VMX_NESTED_PAGING(pVM)
12278 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
12279 || pVCpu->hmr0.s.fLeaveDone);
12280#else
12281 Assert( iCrReg != 3
12282 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
12283#endif
12284
12285 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
12286 Assert( iCrReg != 8
12287 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
12288
12289 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
12290 break;
12291 }
12292
12293 /*
12294 * CLTS (Clear Task-Switch Flag in CR0).
12295 */
12296 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
12297 {
12298 rcStrict = vmxHCExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
12299 break;
12300 }
12301
12302 /*
12303 * LMSW (Load Machine-Status Word into CR0).
12304 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
12305 */
12306 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
12307 {
12308 RTGCPTR GCPtrEffDst;
12309 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
12310 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
12311 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
12312 if (fMemOperand)
12313 {
12314 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
12315 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
12316 }
12317 else
12318 GCPtrEffDst = NIL_RTGCPTR;
12319 rcStrict = vmxHCExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
12320 break;
12321 }
12322
12323 default:
12324 {
12325 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
12326 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
12327 }
12328 }
12329
12330 Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
12331 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
12332 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
12333
12334 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx, y2);
12335 NOREF(pVM);
12336 return rcStrict;
12337}
12338
12339
12340/**
12341 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
12342 * VM-exit.
12343 */
12344HMVMX_EXIT_DECL vmxHCExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
12345{
12346 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12347 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
12348
12349 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
12350 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
12351 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
12352 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
12353 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
12354 | CPUMCTX_EXTRN_EFER);
12355 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
12356 AssertRCReturn(rc, rc);
12357
12358 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
12359 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
12360 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
12361 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
12362 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
12363 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
12364 bool const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
12365 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
12366
12367 /*
12368 * Update exit history to see if this exit can be optimized.
12369 */
12370 VBOXSTRICTRC rcStrict;
12371 PCEMEXITREC pExitRec = NULL;
12372 if ( !fGstStepping
12373 && !fDbgStepping)
12374 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
12375 !fIOString
12376 ? !fIOWrite
12377 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
12378 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
12379 : !fIOWrite
12380 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
12381 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
12382 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
12383 if (!pExitRec)
12384 {
12385 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
12386 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
12387
12388 uint32_t const cbValue = s_aIOSizes[uIOSize];
12389 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
12390 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
12391 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
12392 if (fIOString)
12393 {
12394 /*
12395 * INS/OUTS - I/O String instruction.
12396 *
12397 * Use instruction-information if available, otherwise fall back on
12398 * interpreting the instruction.
12399 */
12400 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
12401 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
12402 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
12403 if (fInsOutsInfo)
12404 {
12405 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
12406 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
12407 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
12408 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
12409 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
12410 if (fIOWrite)
12411 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
12412 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
12413 else
12414 {
12415 /*
12416 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
12417 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
12418 * See Intel Instruction spec. for "INS".
12419 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
12420 */
12421 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
12422 }
12423 }
12424 else
12425 rcStrict = IEMExecOne(pVCpu);
12426
12427 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
12428 fUpdateRipAlready = true;
12429 }
12430 else
12431 {
12432 /*
12433 * IN/OUT - I/O instruction.
12434 */
12435 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
12436 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
12437 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
12438 if (fIOWrite)
12439 {
12440 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
12441 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
12442#ifdef IN_RING0
12443 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
12444 && !pCtx->eflags.Bits.u1TF)
12445 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
12446#endif
12447 }
12448 else
12449 {
12450 uint32_t u32Result = 0;
12451 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
12452 if (IOM_SUCCESS(rcStrict))
12453 {
12454 /* Save result of I/O IN instr. in AL/AX/EAX. */
12455 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
12456 }
12457#ifdef IN_RING0
12458 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
12459 && !pCtx->eflags.Bits.u1TF)
12460 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
12461#endif
12462 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIORead);
12463 }
12464 }
12465
12466 if (IOM_SUCCESS(rcStrict))
12467 {
12468 if (!fUpdateRipAlready)
12469 {
12470 vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
12471 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
12472 }
12473
12474 /*
12475 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
12476 * while booting Fedora 17 64-bit guest.
12477 *
12478 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
12479 */
12480 if (fIOString)
12481 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
12482
12483 /*
12484 * If any I/O breakpoints are armed, we need to check if one triggered
12485 * and take appropriate action.
12486 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
12487 */
12488 rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
12489 AssertRCReturn(rc, rc);
12490
12491 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
12492 * execution engines about whether hyper BPs and such are pending. */
12493 uint32_t const uDr7 = pCtx->dr[7];
12494 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
12495 && X86_DR7_ANY_RW_IO(uDr7)
12496 && (pCtx->cr4 & X86_CR4_DE))
12497 || DBGFBpIsHwIoArmed(pVM)))
12498 {
12499 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
12500
12501#ifdef IN_RING0
12502 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
12503 VMMRZCallRing3Disable(pVCpu);
12504 HM_DISABLE_PREEMPT(pVCpu);
12505
12506 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
12507
12508 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
12509 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
12510 {
12511 /* Raise #DB. */
12512 if (fIsGuestDbgActive)
12513 ASMSetDR6(pCtx->dr[6]);
12514 if (pCtx->dr[7] != uDr7)
12515 VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
12516
12517 vmxHCSetPendingXcptDB(pVCpu);
12518 }
12519 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
12520 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
12521 else if ( rcStrict2 != VINF_SUCCESS
12522 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
12523 rcStrict = rcStrict2;
12524 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
12525
12526 HM_RESTORE_PREEMPT();
12527 VMMRZCallRing3Enable(pVCpu);
12528#else
12529 /** @todo */
12530#endif
12531 }
12532 }
12533
12534#ifdef VBOX_STRICT
12535 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
12536 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
12537 Assert(!fIOWrite);
12538 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
12539 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
12540 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
12541 Assert(fIOWrite);
12542 else
12543 {
12544# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
12545 * statuses, that the VMM device and some others may return. See
12546 * IOM_SUCCESS() for guidance. */
12547 AssertMsg( RT_FAILURE(rcStrict)
12548 || rcStrict == VINF_SUCCESS
12549 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
12550 || rcStrict == VINF_EM_DBG_BREAKPOINT
12551 || rcStrict == VINF_EM_RAW_GUEST_TRAP
12552 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12553# endif
12554 }
12555#endif
12556 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitIO, y1);
12557 }
12558 else
12559 {
12560 /*
12561 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
12562 */
12563 int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
12564 AssertRCReturn(rc2, rc2);
12565 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIORead
12566 : fIOWrite ? &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATS(pVCpu).StatExitIOStringRead);
12567 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
12568 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
12569 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
12570 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
12571
12572 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
12573 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
12574
12575 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
12576 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
12577 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
12578 }
12579 return rcStrict;
12580}
12581
12582
12583/**
12584 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
12585 * VM-exit.
12586 */
12587HMVMX_EXIT_DECL vmxHCExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
12588{
12589 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12590
12591 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
12592 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
12593 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
12594 {
12595 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
12596 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
12597 {
12598 uint32_t uErrCode;
12599 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
12600 {
12601 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
12602 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
12603 }
12604 else
12605 uErrCode = 0;
12606
12607 RTGCUINTPTR GCPtrFaultAddress;
12608 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
12609 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
12610 else
12611 GCPtrFaultAddress = 0;
12612
12613 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
12614
12615 vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
12616 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
12617
12618 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
12619 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
12620 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
12621 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12622 }
12623 }
12624
12625 /* Fall back to the interpreter to emulate the task-switch. */
12626 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitTaskSwitch);
12627 return VERR_EM_INTERPRETER;
12628}
12629
12630
12631/**
12632 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
12633 */
12634HMVMX_EXIT_DECL vmxHCExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
12635{
12636 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12637
12638 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
12639 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
12640 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
12641 AssertRC(rc);
12642 return VINF_EM_DBG_STEPPED;
12643}
12644
12645
12646/**
12647 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
12648 */
12649HMVMX_EXIT_DECL vmxHCExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
12650{
12651 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12652 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitApicAccess);
12653
12654 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
12655 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
12656 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
12657 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
12658 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
12659
12660 /*
12661 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
12662 */
12663 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
12664 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
12665 {
12666 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
12667 if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
12668 {
12669 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
12670 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12671 }
12672 }
12673 else
12674 {
12675 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
12676 return rcStrict;
12677 }
12678
12679 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
12680 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
12681 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
12682 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
12683 AssertRCReturn(rc, rc);
12684
12685 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
12686 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
12687 switch (uAccessType)
12688 {
12689#ifdef IN_RING0
12690 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
12691 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
12692 {
12693 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
12694 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
12695 ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
12696
12697 RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
12698 GCPhys &= PAGE_BASE_GC_MASK;
12699 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
12700 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
12701 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
12702
12703 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
12704 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
12705 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12706 if ( rcStrict == VINF_SUCCESS
12707 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
12708 || rcStrict == VERR_PAGE_NOT_PRESENT)
12709 {
12710 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
12711 | HM_CHANGED_GUEST_APIC_TPR);
12712 rcStrict = VINF_SUCCESS;
12713 }
12714 break;
12715 }
12716#else
12717 /** @todo */
12718#endif
12719
12720 default:
12721 {
12722 Log4Func(("uAccessType=%#x\n", uAccessType));
12723 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
12724 break;
12725 }
12726 }
12727
12728 if (rcStrict != VINF_SUCCESS)
12729 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchApicAccessToR3);
12730 return rcStrict;
12731}
12732
12733
12734/**
12735 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
12736 * VM-exit.
12737 */
12738HMVMX_EXIT_DECL vmxHCExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
12739{
12740 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12741 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
12742
12743 /*
12744 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
12745 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
12746 * must emulate the MOV DRx access.
12747 */
12748 if (!pVmxTransient->fIsNestedGuest)
12749 {
12750 /* We should -not- get this VM-exit if the guest's debug registers were active. */
12751 if (pVmxTransient->fWasGuestDebugStateActive)
12752 {
12753 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
12754 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
12755 }
12756
12757 if ( !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
12758 && !pVmxTransient->fWasHyperDebugStateActive)
12759 {
12760 Assert(!DBGFIsStepping(pVCpu));
12761 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
12762
12763 /* Don't intercept MOV DRx any more. */
12764 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
12765 int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
12766 AssertRC(rc);
12767
12768#ifdef IN_RING0
12769 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
12770 VMMRZCallRing3Disable(pVCpu);
12771 HM_DISABLE_PREEMPT(pVCpu);
12772
12773 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
12774 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
12775 Assert(CPUMIsGuestDebugStateActive(pVCpu));
12776
12777 HM_RESTORE_PREEMPT();
12778 VMMRZCallRing3Enable(pVCpu);
12779#else
12780 /** @todo */
12781#endif
12782
12783#ifdef VBOX_WITH_STATISTICS
12784 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
12785 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
12786 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
12787 else
12788 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
12789#endif
12790 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxContextSwitch);
12791 return VINF_SUCCESS;
12792 }
12793 }
12794
12795 /*
12796 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
12797 * The EFER MSR is always up-to-date.
12798 * Update the segment registers and DR7 from the CPU.
12799 */
12800 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
12801 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
12802 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
12803 AssertRCReturn(rc, rc);
12804 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
12805
12806 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
12807 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
12808 {
12809 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
12810 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
12811 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
12812 if (RT_SUCCESS(rc))
12813 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
12814 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxWrite);
12815 }
12816 else
12817 {
12818 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
12819 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
12820 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
12821 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitDRxRead);
12822 }
12823
12824 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
12825 if (RT_SUCCESS(rc))
12826 {
12827 int rc2 = vmxHCAdvanceGuestRip(pVCpu, pVmxTransient);
12828 AssertRCReturn(rc2, rc2);
12829 return VINF_SUCCESS;
12830 }
12831 return rc;
12832}
12833
12834
12835/**
12836 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
12837 * Conditional VM-exit.
12838 */
12839HMVMX_EXIT_DECL vmxHCExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
12840{
12841 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12842
12843#ifdef IN_RING0
12844 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
12845
12846 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
12847 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
12848 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
12849 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
12850 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
12851
12852 /*
12853 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
12854 */
12855 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
12856 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
12857 {
12858 /*
12859 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
12860 * instruction emulation to inject the original event. Otherwise, injecting the original event
12861 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
12862 */
12863 if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
12864 { /* likely */ }
12865 else
12866 {
12867 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectInterpret);
12868#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12869 /** @todo NSTVMX: Think about how this should be handled. */
12870 if (pVmxTransient->fIsNestedGuest)
12871 return VERR_VMX_IPE_3;
12872#endif
12873 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12874 }
12875 }
12876 else
12877 {
12878 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
12879 return rcStrict;
12880 }
12881
12882 /*
12883 * Get sufficient state and update the exit history entry.
12884 */
12885 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
12886 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
12887 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
12888 AssertRCReturn(rc, rc);
12889
12890 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
12891 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
12892 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
12893 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
12894 if (!pExitRec)
12895 {
12896 /*
12897 * If we succeed, resume guest execution.
12898 * If we fail in interpreting the instruction because we couldn't get the guest physical address
12899 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
12900 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
12901 * weird case. See @bugref{6043}.
12902 */
12903 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
12904 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
12905/** @todo bird: We can probably just go straight to IOM here and assume that
12906 * it's MMIO, then fall back on PGM if that hunch didn't work out so
12907 * well. However, we need to address that aliasing workarounds that
12908 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
12909 *
12910 * Might also be interesting to see if we can get this done more or
12911 * less locklessly inside IOM. Need to consider the lookup table
12912 * updating and use a bit more carefully first (or do all updates via
12913 * rendezvous) */
12914 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
12915 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
12916 if ( rcStrict == VINF_SUCCESS
12917 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
12918 || rcStrict == VERR_PAGE_NOT_PRESENT)
12919 {
12920 /* Successfully handled MMIO operation. */
12921 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
12922 | HM_CHANGED_GUEST_APIC_TPR);
12923 rcStrict = VINF_SUCCESS;
12924 }
12925 }
12926 else
12927 {
12928 /*
12929 * Frequent exit or something needing probing. Call EMHistoryExec.
12930 */
12931 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
12932 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
12933
12934 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
12935 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
12936
12937 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
12938 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
12939 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
12940 }
12941 return rcStrict;
12942#else
12943 AssertFailed();
12944 return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
12945#endif
12946}
12947
12948
12949/**
12950 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
12951 * VM-exit.
12952 */
12953HMVMX_EXIT_DECL vmxHCExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
12954{
12955 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12956#ifdef IN_RING0
12957 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
12958
12959 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
12960 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
12961 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
12962 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
12963 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
12964 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
12965
12966 /*
12967 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
12968 */
12969 VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
12970 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
12971 {
12972 /*
12973 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
12974 * we shall resolve the nested #PF and re-inject the original event.
12975 */
12976 if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
12977 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatInjectReflectNPF);
12978 }
12979 else
12980 {
12981 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
12982 return rcStrict;
12983 }
12984
12985 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
12986 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
12987 int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
12988 AssertRCReturn(rc, rc);
12989
12990 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
12991 uint64_t const uExitQual = pVmxTransient->uExitQual;
12992 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
12993
12994 RTGCUINT uErrorCode = 0;
12995 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
12996 uErrorCode |= X86_TRAP_PF_ID;
12997 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
12998 uErrorCode |= X86_TRAP_PF_RW;
12999 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
13000 uErrorCode |= X86_TRAP_PF_P;
13001
13002 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
13003 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
13004
13005 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
13006
13007 /*
13008 * Handle the pagefault trap for the nested shadow table.
13009 */
13010 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
13011 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
13012 TRPMResetTrap(pVCpu);
13013
13014 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
13015 if ( rcStrict == VINF_SUCCESS
13016 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
13017 || rcStrict == VERR_PAGE_NOT_PRESENT)
13018 {
13019 /* Successfully synced our nested page tables. */
13020 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitReasonNpf);
13021 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
13022 return VINF_SUCCESS;
13023 }
13024#else
13025 PVM pVM = pVCpu->CTX_SUFF(pVM);
13026 uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
13027 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13028 vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
13029 vmxHCImportGuestRip(pVCpu);
13030 vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
13031
13032 /*
13033 * Ask PGM for information about the given GCPhys. We need to check if we're
13034 * out of sync first.
13035 */
13036 NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
13037 PGMPHYSNEMPAGEINFO Info;
13038 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
13039 nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
13040 if (RT_SUCCESS(rc))
13041 {
13042 if (Info.fNemProt & ( RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
13043 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
13044 {
13045 if (State.fCanResume)
13046 {
13047 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
13048 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
13049 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
13050 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
13051 State.fDidSomething ? "" : " no-change"));
13052 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
13053 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
13054 return VINF_SUCCESS;
13055 }
13056 }
13057
13058 Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
13059 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
13060 pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
13061 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
13062 State.fDidSomething ? "" : " no-change"));
13063 }
13064 else
13065 Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
13066 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
13067 pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
13068
13069 /*
13070 * Emulate the memory access, either access handler or special memory.
13071 */
13072 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
13073 RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
13074 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
13075 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
13076 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
13077
13078 rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
13079 AssertRCReturn(rc, rc);
13080
13081 VBOXSTRICTRC rcStrict;
13082 if (!pExitRec)
13083 rcStrict = IEMExecOne(pVCpu);
13084 else
13085 {
13086 /* Frequent access or probing. */
13087 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
13088 Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
13089 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
13090 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
13091 }
13092
13093 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
13094#endif
13095
13096 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13097 return rcStrict;
13098}
13099
13100
13101#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13102/**
13103 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
13104 */
13105HMVMX_EXIT_DECL vmxHCExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13106{
13107 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13108
13109 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13110 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
13111 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13112 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
13113 | CPUMCTX_EXTRN_HWVIRT
13114 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13115 AssertRCReturn(rc, rc);
13116
13117 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13118
13119 VMXVEXITINFO ExitInfo;
13120 RT_ZERO(ExitInfo);
13121 ExitInfo.uReason = pVmxTransient->uExitReason;
13122 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13123 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
13124 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13125 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
13126
13127 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
13128 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13129 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
13130 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13131 {
13132 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13133 rcStrict = VINF_SUCCESS;
13134 }
13135 return rcStrict;
13136}
13137
13138
13139/**
13140 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
13141 */
13142HMVMX_EXIT_DECL vmxHCExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13143{
13144 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13145
13146 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
13147 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
13148 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13149 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
13150 AssertRCReturn(rc, rc);
13151
13152 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13153
13154 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
13155 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
13156 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
13157 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13158 {
13159 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
13160 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
13161 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
13162 }
13163 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
13164 return rcStrict;
13165}
13166
13167
13168/**
13169 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
13170 */
13171HMVMX_EXIT_DECL vmxHCExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13172{
13173 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13174
13175 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13176 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
13177 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13178 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
13179 | CPUMCTX_EXTRN_HWVIRT
13180 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13181 AssertRCReturn(rc, rc);
13182
13183 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13184
13185 VMXVEXITINFO ExitInfo;
13186 RT_ZERO(ExitInfo);
13187 ExitInfo.uReason = pVmxTransient->uExitReason;
13188 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13189 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
13190 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13191 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
13192
13193 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
13194 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13195 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
13196 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13197 {
13198 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13199 rcStrict = VINF_SUCCESS;
13200 }
13201 return rcStrict;
13202}
13203
13204
13205/**
13206 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
13207 */
13208HMVMX_EXIT_DECL vmxHCExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13209{
13210 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13211
13212 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13213 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
13214 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13215 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
13216 | CPUMCTX_EXTRN_HWVIRT
13217 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13218 AssertRCReturn(rc, rc);
13219
13220 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13221
13222 VMXVEXITINFO ExitInfo;
13223 RT_ZERO(ExitInfo);
13224 ExitInfo.uReason = pVmxTransient->uExitReason;
13225 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13226 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
13227 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13228 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
13229
13230 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
13231 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13232 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13233 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13234 {
13235 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13236 rcStrict = VINF_SUCCESS;
13237 }
13238 return rcStrict;
13239}
13240
13241
13242/**
13243 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
13244 */
13245HMVMX_EXIT_DECL vmxHCExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13246{
13247 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13248
13249 /*
13250 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
13251 * thus might not need to import the shadow VMCS state, it's safer just in case
13252 * code elsewhere dares look at unsynced VMCS fields.
13253 */
13254 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13255 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
13256 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13257 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
13258 | CPUMCTX_EXTRN_HWVIRT
13259 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13260 AssertRCReturn(rc, rc);
13261
13262 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13263
13264 VMXVEXITINFO ExitInfo;
13265 RT_ZERO(ExitInfo);
13266 ExitInfo.uReason = pVmxTransient->uExitReason;
13267 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13268 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
13269 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13270 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
13271 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
13272
13273 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
13274 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13275 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13276 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13277 {
13278 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13279 rcStrict = VINF_SUCCESS;
13280 }
13281 return rcStrict;
13282}
13283
13284
13285/**
13286 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
13287 */
13288HMVMX_EXIT_DECL vmxHCExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13289{
13290 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13291
13292 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
13293 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
13294 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13295 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
13296 AssertRCReturn(rc, rc);
13297
13298 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13299
13300 STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
13301 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
13302 STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry, z);
13303 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13304 {
13305 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
13306 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
13307 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
13308 }
13309 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
13310 return rcStrict;
13311}
13312
13313
13314/**
13315 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
13316 */
13317HMVMX_EXIT_DECL vmxHCExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13318{
13319 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13320
13321 /*
13322 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
13323 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
13324 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
13325 */
13326 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13327 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
13328 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13329 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
13330 | CPUMCTX_EXTRN_HWVIRT
13331 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13332 AssertRCReturn(rc, rc);
13333
13334 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13335
13336 VMXVEXITINFO ExitInfo;
13337 RT_ZERO(ExitInfo);
13338 ExitInfo.uReason = pVmxTransient->uExitReason;
13339 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13340 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
13341 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13342 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
13343 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
13344
13345 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
13346 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13347 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
13348 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13349 {
13350 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13351 rcStrict = VINF_SUCCESS;
13352 }
13353 return rcStrict;
13354}
13355
13356
13357/**
13358 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
13359 */
13360HMVMX_EXIT_DECL vmxHCExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13361{
13362 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13363
13364 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13365 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
13366 | CPUMCTX_EXTRN_HWVIRT
13367 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
13368 AssertRCReturn(rc, rc);
13369
13370 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13371
13372 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
13373 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13374 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
13375 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13376 {
13377 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13378 rcStrict = VINF_SUCCESS;
13379 }
13380 return rcStrict;
13381}
13382
13383
13384/**
13385 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
13386 */
13387HMVMX_EXIT_DECL vmxHCExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13388{
13389 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13390
13391 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13392 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
13393 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13394 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
13395 | CPUMCTX_EXTRN_HWVIRT
13396 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13397 AssertRCReturn(rc, rc);
13398
13399 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13400
13401 VMXVEXITINFO ExitInfo;
13402 RT_ZERO(ExitInfo);
13403 ExitInfo.uReason = pVmxTransient->uExitReason;
13404 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13405 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
13406 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13407 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
13408
13409 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
13410 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13411 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
13412 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13413 {
13414 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13415 rcStrict = VINF_SUCCESS;
13416 }
13417 return rcStrict;
13418}
13419
13420
13421/**
13422 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
13423 */
13424HMVMX_EXIT_DECL vmxHCExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13425{
13426 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13427
13428 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13429 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
13430 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13431 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
13432 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13433 AssertRCReturn(rc, rc);
13434
13435 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
13436
13437 VMXVEXITINFO ExitInfo;
13438 RT_ZERO(ExitInfo);
13439 ExitInfo.uReason = pVmxTransient->uExitReason;
13440 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13441 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
13442 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13443 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
13444
13445 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
13446 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13447 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13448 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13449 {
13450 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13451 rcStrict = VINF_SUCCESS;
13452 }
13453 return rcStrict;
13454}
13455#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
13456/** @} */
13457
13458
13459#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13460/** @name Nested-guest VM-exit handlers.
13461 * @{
13462 */
13463/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
13464/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
13465/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
13466
13467/**
13468 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
13469 * Conditional VM-exit.
13470 */
13471HMVMX_EXIT_DECL vmxHCExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13472{
13473 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13474
13475 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
13476
13477 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
13478 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
13479 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
13480
13481 switch (uExitIntType)
13482 {
13483 /*
13484 * Physical NMIs:
13485 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
13486 */
13487 case VMX_EXIT_INT_INFO_TYPE_NMI:
13488 return vmxHCExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
13489
13490 /*
13491 * Hardware exceptions,
13492 * Software exceptions,
13493 * Privileged software exceptions:
13494 * Figure out if the exception must be delivered to the guest or the nested-guest.
13495 */
13496 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
13497 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
13498 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
13499 {
13500 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
13501 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13502 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
13503 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
13504
13505 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
13506 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
13507 pVmxTransient->uExitIntErrorCode);
13508 if (fIntercept)
13509 {
13510 /* Exit qualification is required for debug and page-fault exceptions. */
13511 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13512
13513 /*
13514 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
13515 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
13516 * length. However, if delivery of a software interrupt, software exception or privileged
13517 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
13518 */
13519 VMXVEXITINFO ExitInfo;
13520 RT_ZERO(ExitInfo);
13521 ExitInfo.uReason = pVmxTransient->uExitReason;
13522 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13523 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13524
13525 VMXVEXITEVENTINFO ExitEventInfo;
13526 RT_ZERO(ExitEventInfo);
13527 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
13528 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
13529 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
13530 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
13531
13532#ifdef DEBUG_ramshankar
13533 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
13534 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
13535 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
13536 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
13537 {
13538 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
13539 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
13540 }
13541#endif
13542 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
13543 }
13544
13545 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in vmxHCExitXcptPF. */
13546 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
13547 return vmxHCExitXcpt(pVCpu, pVmxTransient);
13548 }
13549
13550 /*
13551 * Software interrupts:
13552 * VM-exits cannot be caused by software interrupts.
13553 *
13554 * External interrupts:
13555 * This should only happen when "acknowledge external interrupts on VM-exit"
13556 * control is set. However, we never set this when executing a guest or
13557 * nested-guest. For nested-guests it is emulated while injecting interrupts into
13558 * the guest.
13559 */
13560 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
13561 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
13562 default:
13563 {
13564 VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
13565 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
13566 }
13567 }
13568}
13569
13570
13571/**
13572 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
13573 * Unconditional VM-exit.
13574 */
13575HMVMX_EXIT_DECL vmxHCExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13576{
13577 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13578 return IEMExecVmxVmexitTripleFault(pVCpu);
13579}
13580
13581
13582/**
13583 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
13584 */
13585HMVMX_EXIT_NSRC_DECL vmxHCExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13586{
13587 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13588
13589 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
13590 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
13591 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
13592}
13593
13594
13595/**
13596 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
13597 */
13598HMVMX_EXIT_NSRC_DECL vmxHCExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13599{
13600 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13601
13602 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
13603 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
13604 return vmxHCExitIntWindow(pVCpu, pVmxTransient);
13605}
13606
13607
13608/**
13609 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
13610 * Unconditional VM-exit.
13611 */
13612HMVMX_EXIT_DECL vmxHCExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13613{
13614 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13615
13616 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13617 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13618 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
13619 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
13620
13621 VMXVEXITINFO ExitInfo;
13622 RT_ZERO(ExitInfo);
13623 ExitInfo.uReason = pVmxTransient->uExitReason;
13624 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13625 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13626
13627 VMXVEXITEVENTINFO ExitEventInfo;
13628 RT_ZERO(ExitEventInfo);
13629 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
13630 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
13631 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
13632}
13633
13634
13635/**
13636 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
13637 */
13638HMVMX_EXIT_DECL vmxHCExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13639{
13640 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13641
13642 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
13643 {
13644 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13645 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
13646 }
13647 return vmxHCExitHlt(pVCpu, pVmxTransient);
13648}
13649
13650
13651/**
13652 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
13653 */
13654HMVMX_EXIT_DECL vmxHCExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13655{
13656 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13657
13658 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
13659 {
13660 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13661 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13662
13663 VMXVEXITINFO ExitInfo;
13664 RT_ZERO(ExitInfo);
13665 ExitInfo.uReason = pVmxTransient->uExitReason;
13666 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13667 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13668 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
13669 }
13670 return vmxHCExitInvlpg(pVCpu, pVmxTransient);
13671}
13672
13673
13674/**
13675 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
13676 */
13677HMVMX_EXIT_DECL vmxHCExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13678{
13679 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13680
13681 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
13682 {
13683 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13684 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
13685 }
13686 return vmxHCExitRdpmc(pVCpu, pVmxTransient);
13687}
13688
13689
13690/**
13691 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
13692 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
13693 */
13694HMVMX_EXIT_DECL vmxHCExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13695{
13696 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13697
13698 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
13699 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
13700
13701 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
13702
13703 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
13704 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
13705 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
13706
13707 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
13708 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
13709 u64VmcsField &= UINT64_C(0xffffffff);
13710
13711 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
13712 {
13713 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13714 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13715
13716 VMXVEXITINFO ExitInfo;
13717 RT_ZERO(ExitInfo);
13718 ExitInfo.uReason = pVmxTransient->uExitReason;
13719 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13720 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13721 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
13722 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
13723 }
13724
13725 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
13726 return vmxHCExitVmread(pVCpu, pVmxTransient);
13727 return vmxHCExitVmwrite(pVCpu, pVmxTransient);
13728}
13729
13730
13731/**
13732 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
13733 */
13734HMVMX_EXIT_DECL vmxHCExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13735{
13736 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13737
13738 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
13739 {
13740 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13741 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
13742 }
13743
13744 return vmxHCExitRdtsc(pVCpu, pVmxTransient);
13745}
13746
13747
13748/**
13749 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
13750 * Conditional VM-exit.
13751 */
13752HMVMX_EXIT_DECL vmxHCExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13753{
13754 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13755
13756 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13757 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13758
13759 VBOXSTRICTRC rcStrict;
13760 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
13761 switch (uAccessType)
13762 {
13763 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
13764 {
13765 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
13766 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
13767 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
13768 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
13769
13770 bool fIntercept;
13771 switch (iCrReg)
13772 {
13773 case 0:
13774 case 4:
13775 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
13776 break;
13777
13778 case 3:
13779 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
13780 break;
13781
13782 case 8:
13783 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
13784 break;
13785
13786 default:
13787 fIntercept = false;
13788 break;
13789 }
13790 if (fIntercept)
13791 {
13792 VMXVEXITINFO ExitInfo;
13793 RT_ZERO(ExitInfo);
13794 ExitInfo.uReason = pVmxTransient->uExitReason;
13795 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13796 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13797 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
13798 }
13799 else
13800 {
13801 int const rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
13802 AssertRCReturn(rc, rc);
13803 rcStrict = vmxHCExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
13804 }
13805 break;
13806 }
13807
13808 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
13809 {
13810 /*
13811 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
13812 * CR2 reads do not cause a VM-exit.
13813 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
13814 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
13815 */
13816 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
13817 if ( iCrReg == 3
13818 || iCrReg == 8)
13819 {
13820 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
13821 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
13822 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
13823 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
13824 {
13825 VMXVEXITINFO ExitInfo;
13826 RT_ZERO(ExitInfo);
13827 ExitInfo.uReason = pVmxTransient->uExitReason;
13828 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13829 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13830 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
13831 }
13832 else
13833 {
13834 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
13835 rcStrict = vmxHCExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
13836 }
13837 }
13838 else
13839 {
13840 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
13841 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
13842 }
13843 break;
13844 }
13845
13846 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
13847 {
13848 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
13849 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
13850 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
13851 if ( (uGstHostMask & X86_CR0_TS)
13852 && (uReadShadow & X86_CR0_TS))
13853 {
13854 VMXVEXITINFO ExitInfo;
13855 RT_ZERO(ExitInfo);
13856 ExitInfo.uReason = pVmxTransient->uExitReason;
13857 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13858 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13859 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
13860 }
13861 else
13862 rcStrict = vmxHCExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
13863 break;
13864 }
13865
13866 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
13867 {
13868 RTGCPTR GCPtrEffDst;
13869 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
13870 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
13871 if (fMemOperand)
13872 {
13873 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
13874 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
13875 }
13876 else
13877 GCPtrEffDst = NIL_RTGCPTR;
13878
13879 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
13880 {
13881 VMXVEXITINFO ExitInfo;
13882 RT_ZERO(ExitInfo);
13883 ExitInfo.uReason = pVmxTransient->uExitReason;
13884 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13885 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
13886 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13887 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
13888 }
13889 else
13890 rcStrict = vmxHCExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
13891 break;
13892 }
13893
13894 default:
13895 {
13896 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
13897 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
13898 }
13899 }
13900
13901 if (rcStrict == VINF_IEM_RAISED_XCPT)
13902 {
13903 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13904 rcStrict = VINF_SUCCESS;
13905 }
13906 return rcStrict;
13907}
13908
13909
13910/**
13911 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
13912 * Conditional VM-exit.
13913 */
13914HMVMX_EXIT_DECL vmxHCExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13915{
13916 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13917
13918 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
13919 {
13920 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13921 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13922
13923 VMXVEXITINFO ExitInfo;
13924 RT_ZERO(ExitInfo);
13925 ExitInfo.uReason = pVmxTransient->uExitReason;
13926 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13927 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13928 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
13929 }
13930 return vmxHCExitMovDRx(pVCpu, pVmxTransient);
13931}
13932
13933
13934/**
13935 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
13936 * Conditional VM-exit.
13937 */
13938HMVMX_EXIT_DECL vmxHCExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13939{
13940 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13941
13942 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
13943
13944 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
13945 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
13946 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
13947
13948 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
13949 uint8_t const cbAccess = s_aIOSizes[uIOSize];
13950 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
13951 {
13952 /*
13953 * IN/OUT instruction:
13954 * - Provides VM-exit instruction length.
13955 *
13956 * INS/OUTS instruction:
13957 * - Provides VM-exit instruction length.
13958 * - Provides Guest-linear address.
13959 * - Optionally provides VM-exit instruction info (depends on CPU feature).
13960 */
13961 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
13962 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
13963
13964 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
13965 pVmxTransient->ExitInstrInfo.u = 0;
13966 pVmxTransient->uGuestLinearAddr = 0;
13967
13968 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
13969 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
13970 if (fIOString)
13971 {
13972 vmxHCReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
13973 if (fVmxInsOutsInfo)
13974 {
13975 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
13976 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
13977 }
13978 }
13979
13980 VMXVEXITINFO ExitInfo;
13981 RT_ZERO(ExitInfo);
13982 ExitInfo.uReason = pVmxTransient->uExitReason;
13983 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
13984 ExitInfo.u64Qual = pVmxTransient->uExitQual;
13985 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
13986 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
13987 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
13988 }
13989 return vmxHCExitIoInstr(pVCpu, pVmxTransient);
13990}
13991
13992
13993/**
13994 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
13995 */
13996HMVMX_EXIT_DECL vmxHCExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13997{
13998 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13999
14000 uint32_t fMsrpm;
14001 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
14002 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
14003 else
14004 fMsrpm = VMXMSRPM_EXIT_RD;
14005
14006 if (fMsrpm & VMXMSRPM_EXIT_RD)
14007 {
14008 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
14009 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
14010 }
14011 return vmxHCExitRdmsr(pVCpu, pVmxTransient);
14012}
14013
14014
14015/**
14016 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
14017 */
14018HMVMX_EXIT_DECL vmxHCExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14019{
14020 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14021
14022 uint32_t fMsrpm;
14023 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
14024 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
14025 else
14026 fMsrpm = VMXMSRPM_EXIT_WR;
14027
14028 if (fMsrpm & VMXMSRPM_EXIT_WR)
14029 {
14030 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
14031 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
14032 }
14033 return vmxHCExitWrmsr(pVCpu, pVmxTransient);
14034}
14035
14036
14037/**
14038 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
14039 */
14040HMVMX_EXIT_DECL vmxHCExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14041{
14042 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14043
14044 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
14045 {
14046 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
14047 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
14048 }
14049 return vmxHCExitMwait(pVCpu, pVmxTransient);
14050}
14051
14052
14053/**
14054 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
14055 * VM-exit.
14056 */
14057HMVMX_EXIT_DECL vmxHCExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14058{
14059 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14060
14061 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
14062 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
14063 VMXVEXITINFO ExitInfo;
14064 RT_ZERO(ExitInfo);
14065 ExitInfo.uReason = pVmxTransient->uExitReason;
14066 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
14067 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
14068}
14069
14070
14071/**
14072 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
14073 */
14074HMVMX_EXIT_DECL vmxHCExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14075{
14076 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14077
14078 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
14079 {
14080 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
14081 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
14082 }
14083 return vmxHCExitMonitor(pVCpu, pVmxTransient);
14084}
14085
14086
14087/**
14088 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
14089 */
14090HMVMX_EXIT_DECL vmxHCExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14091{
14092 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14093
14094 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
14095 * PAUSE when executing a nested-guest? If it does not, we would not need
14096 * to check for the intercepts here. Just call VM-exit... */
14097
14098 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
14099 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
14100 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
14101 {
14102 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
14103 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
14104 }
14105 return vmxHCExitPause(pVCpu, pVmxTransient);
14106}
14107
14108
14109/**
14110 * Nested-guest VM-exit handler for when the TPR value is lowered below the
14111 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
14112 */
14113HMVMX_EXIT_NSRC_DECL vmxHCExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14114{
14115 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14116
14117 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
14118 {
14119 vmxHCReadGuestPendingDbgXctps(pVCpu, pVmxTransient);
14120 VMXVEXITINFO ExitInfo;
14121 RT_ZERO(ExitInfo);
14122 ExitInfo.uReason = pVmxTransient->uExitReason;
14123 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
14124 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
14125 }
14126 return vmxHCExitTprBelowThreshold(pVCpu, pVmxTransient);
14127}
14128
14129
14130/**
14131 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
14132 * VM-exit.
14133 */
14134HMVMX_EXIT_DECL vmxHCExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14135{
14136 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14137
14138 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
14139 vmxHCReadIdtVectoringInfoVmcs(pVCpu, pVmxTransient);
14140 vmxHCReadIdtVectoringErrorCodeVmcs(pVCpu, pVmxTransient);
14141 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
14142
14143 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
14144
14145 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
14146 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
14147
14148 VMXVEXITINFO ExitInfo;
14149 RT_ZERO(ExitInfo);
14150 ExitInfo.uReason = pVmxTransient->uExitReason;
14151 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
14152 ExitInfo.u64Qual = pVmxTransient->uExitQual;
14153
14154 VMXVEXITEVENTINFO ExitEventInfo;
14155 RT_ZERO(ExitEventInfo);
14156 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
14157 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
14158 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
14159}
14160
14161
14162/**
14163 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
14164 * Conditional VM-exit.
14165 */
14166HMVMX_EXIT_DECL vmxHCExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14167{
14168 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14169
14170 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
14171 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
14172 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
14173}
14174
14175
14176/**
14177 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
14178 * Conditional VM-exit.
14179 */
14180HMVMX_EXIT_DECL vmxHCExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14181{
14182 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14183
14184 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
14185 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
14186 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
14187}
14188
14189
14190/**
14191 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
14192 */
14193HMVMX_EXIT_DECL vmxHCExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14194{
14195 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14196
14197 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
14198 {
14199 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
14200 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
14201 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
14202 }
14203 return vmxHCExitRdtscp(pVCpu, pVmxTransient);
14204}
14205
14206
14207/**
14208 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
14209 */
14210HMVMX_EXIT_NSRC_DECL vmxHCExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14211{
14212 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14213
14214 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
14215 {
14216 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
14217 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
14218 }
14219 return vmxHCExitWbinvd(pVCpu, pVmxTransient);
14220}
14221
14222
14223/**
14224 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
14225 */
14226HMVMX_EXIT_DECL vmxHCExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14227{
14228 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14229
14230 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
14231 {
14232 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
14233 vmxHCReadExitInstrLenVmcs(ppVCpu, VmxTransient);
14234 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
14235 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
14236
14237 VMXVEXITINFO ExitInfo;
14238 RT_ZERO(ExitInfo);
14239 ExitInfo.uReason = pVmxTransient->uExitReason;
14240 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
14241 ExitInfo.u64Qual = pVmxTransient->uExitQual;
14242 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
14243 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
14244 }
14245 return vmxHCExitInvpcid(pVCpu, pVmxTransient);
14246}
14247
14248
14249/**
14250 * Nested-guest VM-exit handler for invalid-guest state
14251 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
14252 */
14253HMVMX_EXIT_DECL vmxHCExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14254{
14255 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14256
14257 /*
14258 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
14259 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
14260 * Handle it like it's in an invalid guest state of the outer guest.
14261 *
14262 * When the fast path is implemented, this should be changed to cause the corresponding
14263 * nested-guest VM-exit.
14264 */
14265 return vmxHCExitErrInvalidGuestState(pVCpu, pVmxTransient);
14266}
14267
14268
14269/**
14270 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
14271 * and only provide the instruction length.
14272 *
14273 * Unconditional VM-exit.
14274 */
14275HMVMX_EXIT_DECL vmxHCExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14276{
14277 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14278
14279#ifdef VBOX_STRICT
14280 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14281 switch (pVmxTransient->uExitReason)
14282 {
14283 case VMX_EXIT_ENCLS:
14284 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
14285 break;
14286
14287 case VMX_EXIT_VMFUNC:
14288 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
14289 break;
14290 }
14291#endif
14292
14293 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
14294 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
14295}
14296
14297
14298/**
14299 * Nested-guest VM-exit handler for instructions that provide instruction length as
14300 * well as more information.
14301 *
14302 * Unconditional VM-exit.
14303 */
14304HMVMX_EXIT_DECL vmxHCExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14305{
14306 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14307
14308#ifdef VBOX_STRICT
14309 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14310 switch (pVmxTransient->uExitReason)
14311 {
14312 case VMX_EXIT_GDTR_IDTR_ACCESS:
14313 case VMX_EXIT_LDTR_TR_ACCESS:
14314 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
14315 break;
14316
14317 case VMX_EXIT_RDRAND:
14318 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
14319 break;
14320
14321 case VMX_EXIT_RDSEED:
14322 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
14323 break;
14324
14325 case VMX_EXIT_XSAVES:
14326 case VMX_EXIT_XRSTORS:
14327 /** @todo NSTVMX: Verify XSS-bitmap. */
14328 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
14329 break;
14330
14331 case VMX_EXIT_UMWAIT:
14332 case VMX_EXIT_TPAUSE:
14333 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
14334 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
14335 break;
14336
14337 case VMX_EXIT_LOADIWKEY:
14338 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
14339 break;
14340 }
14341#endif
14342
14343 vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
14344 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
14345 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
14346
14347 VMXVEXITINFO ExitInfo;
14348 RT_ZERO(ExitInfo);
14349 ExitInfo.uReason = pVmxTransient->uExitReason;
14350 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
14351 ExitInfo.u64Qual = pVmxTransient->uExitQual;
14352 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
14353 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
14354}
14355
14356/** @} */
14357#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
14358
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette