VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/HM.cpp@ 97069

Last change on this file since 97069 was 97069, checked in by vboxsync, 2 years ago

VMM/HMVMXR0: Working on streamlining CPU state importing from the VMCS. This does cause quite some code bloat (release linux from 93950 to 132120 text bytes), but it is hopefully worth it. This should also provide some basis for addressing the @todo in nemR3DarwinHandleExitCommon (NEM/darwin) where the code imports the entire state for every exit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 157.8 KB
Line 
1/* $Id: HM.cpp 97069 2022-10-10 15:03:10Z vboxsync $ */
2/** @file
3 * HM - Intel/AMD VM Hardware Support Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_hm HM - Hardware Assisted Virtualization Manager
29 *
30 * The HM manages guest execution using the VT-x and AMD-V CPU hardware
31 * extensions.
32 *
33 * {summary of what HM does}
34 *
35 * Hardware assisted virtualization manager was originally abbreviated HWACCM,
36 * however that was cumbersome to write and parse for such a central component,
37 * so it was shortened to HM when refactoring the code in the 4.3 development
38 * cycle.
39 *
40 * {add sections with more details}
41 *
42 * @sa @ref grp_hm
43 */
44
45
46/*********************************************************************************************************************************
47* Header Files *
48*********************************************************************************************************************************/
49#define LOG_GROUP LOG_GROUP_HM
50#define VMCPU_INCL_CPUM_GST_CTX
51#include <VBox/vmm/cpum.h>
52#include <VBox/vmm/stam.h>
53#include <VBox/vmm/em.h>
54#include <VBox/vmm/pdmapi.h>
55#include <VBox/vmm/pgm.h>
56#include <VBox/vmm/ssm.h>
57#include <VBox/vmm/gim.h>
58#include <VBox/vmm/gcm.h>
59#include <VBox/vmm/trpm.h>
60#include <VBox/vmm/dbgf.h>
61#include <VBox/vmm/iom.h>
62#include <VBox/vmm/iem.h>
63#include <VBox/vmm/selm.h>
64#include <VBox/vmm/nem.h>
65#include <VBox/vmm/hm_vmx.h>
66#include <VBox/vmm/hm_svm.h>
67#include "HMInternal.h"
68#include <VBox/vmm/vmcc.h>
69#include <VBox/err.h>
70#include <VBox/param.h>
71
72#include <iprt/assert.h>
73#include <VBox/log.h>
74#include <iprt/asm.h>
75#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
76# include <iprt/asm-amd64-x86.h>
77#endif
78#include <iprt/env.h>
79#include <iprt/thread.h>
80
81
82/*********************************************************************************************************************************
83* Defined Constants And Macros *
84*********************************************************************************************************************************/
85/** @def HMVMX_REPORT_FEAT
86 * Reports VT-x feature to the release log.
87 *
88 * @param a_uAllowed1 Mask of allowed-1 feature bits.
89 * @param a_uAllowed0 Mask of allowed-0 feature bits.
90 * @param a_StrDesc The description string to report.
91 * @param a_Featflag Mask of the feature to report.
92 */
93#define HMVMX_REPORT_FEAT(a_uAllowed1, a_uAllowed0, a_StrDesc, a_Featflag) \
94 do { \
95 if ((a_uAllowed1) & (a_Featflag)) \
96 { \
97 if ((a_uAllowed0) & (a_Featflag)) \
98 LogRel(("HM: " a_StrDesc " (must be set)\n")); \
99 else \
100 LogRel(("HM: " a_StrDesc "\n")); \
101 } \
102 else \
103 LogRel(("HM: " a_StrDesc " (must be cleared)\n")); \
104 } while (0)
105
106/** @def HMVMX_REPORT_ALLOWED_FEAT
107 * Reports an allowed VT-x feature to the release log.
108 *
109 * @param a_uAllowed1 Mask of allowed-1 feature bits.
110 * @param a_StrDesc The description string to report.
111 * @param a_FeatFlag Mask of the feature to report.
112 */
113#define HMVMX_REPORT_ALLOWED_FEAT(a_uAllowed1, a_StrDesc, a_FeatFlag) \
114 do { \
115 if ((a_uAllowed1) & (a_FeatFlag)) \
116 LogRel(("HM: " a_StrDesc "\n")); \
117 else \
118 LogRel(("HM: " a_StrDesc " not supported\n")); \
119 } while (0)
120
121/** @def HMVMX_REPORT_MSR_CAP
122 * Reports MSR feature capability.
123 *
124 * @param a_MsrCaps Mask of MSR feature bits.
125 * @param a_StrDesc The description string to report.
126 * @param a_fCap Mask of the feature to report.
127 */
128#define HMVMX_REPORT_MSR_CAP(a_MsrCaps, a_StrDesc, a_fCap) \
129 do { \
130 if ((a_MsrCaps) & (a_fCap)) \
131 LogRel(("HM: " a_StrDesc "\n")); \
132 } while (0)
133
134/** @def HMVMX_LOGREL_FEAT
135 * Dumps a feature flag from a bitmap of features to the release log.
136 *
137 * @param a_fVal The value of all the features.
138 * @param a_fMask The specific bitmask of the feature.
139 */
140#define HMVMX_LOGREL_FEAT(a_fVal, a_fMask) \
141 do { \
142 if ((a_fVal) & (a_fMask)) \
143 LogRel(("HM: %s\n", #a_fMask)); \
144 } while (0)
145
146
147/*********************************************************************************************************************************
148* Internal Functions *
149*********************************************************************************************************************************/
150static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM);
151static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
152static DECLCALLBACK(void) hmR3InfoSvmNstGstVmcbCache(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
153static DECLCALLBACK(void) hmR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
154static DECLCALLBACK(void) hmR3InfoEventPending(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
155static DECLCALLBACK(void) hmR3InfoLbr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
156static int hmR3InitFinalizeR3(PVM pVM);
157static int hmR3InitFinalizeR0(PVM pVM);
158static int hmR3InitFinalizeR0Intel(PVM pVM);
159static int hmR3InitFinalizeR0Amd(PVM pVM);
160static int hmR3TermCPU(PVM pVM);
161
162
163#ifdef VBOX_WITH_STATISTICS
164/**
165 * Returns the name of the hardware exception.
166 *
167 * @returns The name of the hardware exception.
168 * @param uVector The exception vector.
169 */
170static const char *hmR3GetXcptName(uint8_t uVector)
171{
172 switch (uVector)
173 {
174 case X86_XCPT_DE: return "#DE";
175 case X86_XCPT_DB: return "#DB";
176 case X86_XCPT_NMI: return "#NMI";
177 case X86_XCPT_BP: return "#BP";
178 case X86_XCPT_OF: return "#OF";
179 case X86_XCPT_BR: return "#BR";
180 case X86_XCPT_UD: return "#UD";
181 case X86_XCPT_NM: return "#NM";
182 case X86_XCPT_DF: return "#DF";
183 case X86_XCPT_CO_SEG_OVERRUN: return "#CO_SEG_OVERRUN";
184 case X86_XCPT_TS: return "#TS";
185 case X86_XCPT_NP: return "#NP";
186 case X86_XCPT_SS: return "#SS";
187 case X86_XCPT_GP: return "#GP";
188 case X86_XCPT_PF: return "#PF";
189 case X86_XCPT_MF: return "#MF";
190 case X86_XCPT_AC: return "#AC";
191 case X86_XCPT_MC: return "#MC";
192 case X86_XCPT_XF: return "#XF";
193 case X86_XCPT_VE: return "#VE";
194 case X86_XCPT_CP: return "#CP";
195 case X86_XCPT_VC: return "#VC";
196 case X86_XCPT_SX: return "#SX";
197 }
198 return "Reserved";
199}
200#endif /* VBOX_WITH_STATISTICS */
201
202
203/**
204 * Initializes the HM.
205 *
206 * This is the very first component to really do init after CFGM so that we can
207 * establish the predominant execution engine for the VM prior to initializing
208 * other modules. It takes care of NEM initialization if needed (HM disabled or
209 * not available in HW).
210 *
211 * If VT-x or AMD-V hardware isn't available, HM will try fall back on a native
212 * hypervisor API via NEM, and then back on raw-mode if that isn't available
213 * either. The fallback to raw-mode will not happen if /HM/HMForced is set
214 * (like for guest using SMP or 64-bit as well as for complicated guest like OS
215 * X, OS/2 and others).
216 *
217 * Note that a lot of the set up work is done in ring-0 and thus postponed till
218 * the ring-3 and ring-0 callback to HMR3InitCompleted.
219 *
220 * @returns VBox status code.
221 * @param pVM The cross context VM structure.
222 *
223 * @remarks Be careful with what we call here, since most of the VMM components
224 * are uninitialized.
225 */
226VMMR3_INT_DECL(int) HMR3Init(PVM pVM)
227{
228 LogFlowFunc(("\n"));
229
230 /*
231 * Assert alignment and sizes.
232 */
233 AssertCompileMemberAlignment(VM, hm.s, 32);
234 AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding));
235
236 /*
237 * Register the saved state data unit.
238 */
239 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HM_SAVED_STATE_VERSION, sizeof(HM),
240 NULL, NULL, NULL,
241 NULL, hmR3Save, NULL,
242 NULL, hmR3Load, NULL);
243 if (RT_FAILURE(rc))
244 return rc;
245
246 /*
247 * Read configuration.
248 */
249 PCFGMNODE pCfgHm = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HM/");
250
251 /*
252 * Validate the HM settings.
253 */
254 rc = CFGMR3ValidateConfig(pCfgHm, "/HM/",
255 "HMForced" /* implied 'true' these days */
256 "|UseNEMInstead"
257 "|FallbackToNEM"
258 "|FallbackToIEM"
259 "|EnableNestedPaging"
260 "|EnableUX"
261 "|EnableLargePages"
262 "|EnableVPID"
263 "|IBPBOnVMExit"
264 "|IBPBOnVMEntry"
265 "|SpecCtrlByHost"
266 "|L1DFlushOnSched"
267 "|L1DFlushOnVMEntry"
268 "|MDSClearOnSched"
269 "|MDSClearOnVMEntry"
270 "|TPRPatchingEnabled"
271 "|64bitEnabled"
272 "|Exclusive"
273 "|MaxResumeLoops"
274 "|VmxPleGap"
275 "|VmxPleWindow"
276 "|VmxLbr"
277 "|UseVmxPreemptTimer"
278 "|SvmPauseFilter"
279 "|SvmPauseFilterThreshold"
280 "|SvmVirtVmsaveVmload"
281 "|SvmVGif"
282 "|LovelyMesaDrvWorkaround"
283 "|MissingOS2TlbFlushWorkaround",
284 "" /* pszValidNodes */, "HM" /* pszWho */, 0 /* uInstance */);
285 if (RT_FAILURE(rc))
286 return rc;
287
288 /** @cfgm{/HM/HMForced, bool, false}
289 * Forces hardware virtualization, no falling back on raw-mode. HM must be
290 * enabled, i.e. /HMEnabled must be true. */
291 bool const fHMForced = true;
292#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
293 AssertRelease(pVM->fHMEnabled);
294#else
295 AssertRelease(!pVM->fHMEnabled);
296#endif
297
298 /** @cfgm{/HM/UseNEMInstead, bool, true}
299 * Don't use HM, use NEM instead. */
300 bool fUseNEMInstead = false;
301 rc = CFGMR3QueryBoolDef(pCfgHm, "UseNEMInstead", &fUseNEMInstead, false);
302 AssertRCReturn(rc, rc);
303 if (fUseNEMInstead && pVM->fHMEnabled)
304 {
305 LogRel(("HM: Setting fHMEnabled to false because fUseNEMInstead is set.\n"));
306 pVM->fHMEnabled = false;
307 }
308
309 /** @cfgm{/HM/FallbackToNEM, bool, true}
310 * Enables fallback on NEM. */
311 bool fFallbackToNEM = true;
312 rc = CFGMR3QueryBoolDef(pCfgHm, "FallbackToNEM", &fFallbackToNEM, true);
313 AssertRCReturn(rc, rc);
314
315 /** @cfgm{/HM/FallbackToIEM, bool, false on AMD64 else true }
316 * Enables fallback on NEM. */
317#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
318 bool fFallbackToIEM = false;
319#else
320 bool fFallbackToIEM = true;
321#endif
322 rc = CFGMR3QueryBoolDef(pCfgHm, "FallbackToIEM", &fFallbackToIEM, fFallbackToIEM);
323 AssertRCReturn(rc, rc);
324
325 /** @cfgm{/HM/EnableNestedPaging, bool, false}
326 * Enables nested paging (aka extended page tables). */
327 bool fAllowNestedPaging = false;
328 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableNestedPaging", &fAllowNestedPaging, false);
329 AssertRCReturn(rc, rc);
330
331 /** @cfgm{/HM/EnableUX, bool, true}
332 * Enables the VT-x unrestricted execution feature. */
333 bool fAllowUnrestricted = true;
334 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableUX", &fAllowUnrestricted, true);
335 AssertRCReturn(rc, rc);
336
337 /** @cfgm{/HM/EnableLargePages, bool, false}
338 * Enables using large pages (2 MB) for guest memory, thus saving on (nested)
339 * page table walking and maybe better TLB hit rate in some cases. */
340 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableLargePages", &pVM->hm.s.fLargePages, false);
341 AssertRCReturn(rc, rc);
342
343 /** @cfgm{/HM/EnableVPID, bool, false}
344 * Enables the VT-x VPID feature. */
345 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableVPID", &pVM->hm.s.vmx.fAllowVpid, false);
346 AssertRCReturn(rc, rc);
347
348 /** @cfgm{/HM/TPRPatchingEnabled, bool, false}
349 * Enables TPR patching for 32-bit windows guests with IO-APIC. */
350 rc = CFGMR3QueryBoolDef(pCfgHm, "TPRPatchingEnabled", &pVM->hm.s.fTprPatchingAllowed, false);
351 AssertRCReturn(rc, rc);
352
353 /** @cfgm{/HM/64bitEnabled, bool, 32-bit:false, 64-bit:true}
354 * Enables AMD64 cpu features.
355 * On 32-bit hosts this isn't default and require host CPU support. 64-bit hosts
356 * already have the support. */
357#ifdef VBOX_WITH_64_BITS_GUESTS
358 rc = CFGMR3QueryBoolDef(pCfgHm, "64bitEnabled", &pVM->hm.s.fAllow64BitGuestsCfg, HC_ARCH_BITS == 64);
359 AssertLogRelRCReturn(rc, rc);
360#else
361 pVM->hm.s.fAllow64BitGuestsCfg = false;
362#endif
363
364 /** @cfgm{/HM/VmxPleGap, uint32_t, 0}
365 * The pause-filter exiting gap in TSC ticks. When the number of ticks between
366 * two successive PAUSE instructions exceeds VmxPleGap, the CPU considers the
367 * latest PAUSE instruction to be start of a new PAUSE loop.
368 */
369 rc = CFGMR3QueryU32Def(pCfgHm, "VmxPleGap", &pVM->hm.s.vmx.cPleGapTicks, 0);
370 AssertRCReturn(rc, rc);
371
372 /** @cfgm{/HM/VmxPleWindow, uint32_t, 0}
373 * The pause-filter exiting window in TSC ticks. When the number of ticks
374 * between the current PAUSE instruction and first PAUSE of a loop exceeds
375 * VmxPleWindow, a VM-exit is triggered.
376 *
377 * Setting VmxPleGap and VmxPleGap to 0 disables pause-filter exiting.
378 */
379 rc = CFGMR3QueryU32Def(pCfgHm, "VmxPleWindow", &pVM->hm.s.vmx.cPleWindowTicks, 0);
380 AssertRCReturn(rc, rc);
381
382 /** @cfgm{/HM/VmxLbr, bool, false}
383 * Whether to enable LBR for the guest. This is disabled by default as it's only
384 * useful while debugging and enabling it causes a noticeable performance hit. */
385 rc = CFGMR3QueryBoolDef(pCfgHm, "VmxLbr", &pVM->hm.s.vmx.fLbrCfg, false);
386 AssertRCReturn(rc, rc);
387
388 /** @cfgm{/HM/SvmPauseFilterCount, uint16_t, 0}
389 * A counter that is decrement each time a PAUSE instruction is executed by the
390 * guest. When the counter is 0, a \#VMEXIT is triggered.
391 *
392 * Setting SvmPauseFilterCount to 0 disables pause-filter exiting.
393 */
394 rc = CFGMR3QueryU16Def(pCfgHm, "SvmPauseFilter", &pVM->hm.s.svm.cPauseFilter, 0);
395 AssertRCReturn(rc, rc);
396
397 /** @cfgm{/HM/SvmPauseFilterThreshold, uint16_t, 0}
398 * The pause filter threshold in ticks. When the elapsed time (in ticks) between
399 * two successive PAUSE instructions exceeds SvmPauseFilterThreshold, the
400 * PauseFilter count is reset to its initial value. However, if PAUSE is
401 * executed PauseFilter times within PauseFilterThreshold ticks, a VM-exit will
402 * be triggered.
403 *
404 * Requires SvmPauseFilterCount to be non-zero for pause-filter threshold to be
405 * activated.
406 */
407 rc = CFGMR3QueryU16Def(pCfgHm, "SvmPauseFilterThreshold", &pVM->hm.s.svm.cPauseFilterThresholdTicks, 0);
408 AssertRCReturn(rc, rc);
409
410 /** @cfgm{/HM/SvmVirtVmsaveVmload, bool, true}
411 * Whether to make use of virtualized VMSAVE/VMLOAD feature of the CPU if it's
412 * available. */
413 rc = CFGMR3QueryBoolDef(pCfgHm, "SvmVirtVmsaveVmload", &pVM->hm.s.svm.fVirtVmsaveVmload, true);
414 AssertRCReturn(rc, rc);
415
416 /** @cfgm{/HM/SvmVGif, bool, true}
417 * Whether to make use of Virtual GIF (Global Interrupt Flag) feature of the CPU
418 * if it's available. */
419 rc = CFGMR3QueryBoolDef(pCfgHm, "SvmVGif", &pVM->hm.s.svm.fVGif, true);
420 AssertRCReturn(rc, rc);
421
422 /** @cfgm{/HM/SvmLbrVirt, bool, false}
423 * Whether to make use of the LBR virtualization feature of the CPU if it's
424 * available. This is disabled by default as it's only useful while debugging
425 * and enabling it causes a small hit to performance. */
426 rc = CFGMR3QueryBoolDef(pCfgHm, "SvmLbrVirt", &pVM->hm.s.svm.fLbrVirt, false);
427 AssertRCReturn(rc, rc);
428
429 /** @cfgm{/HM/Exclusive, bool}
430 * Determines the init method for AMD-V and VT-x. If set to true, HM will do a
431 * global init for each host CPU. If false, we do local init each time we wish
432 * to execute guest code.
433 *
434 * On Windows, default is false due to the higher risk of conflicts with other
435 * hypervisors.
436 *
437 * On Mac OS X, this setting is ignored since the code does not handle local
438 * init when it utilizes the OS provided VT-x function, SUPR0EnableVTx().
439 */
440#if defined(RT_OS_DARWIN)
441 pVM->hm.s.fGlobalInit = true;
442#else
443 rc = CFGMR3QueryBoolDef(pCfgHm, "Exclusive", &pVM->hm.s.fGlobalInit,
444# if defined(RT_OS_WINDOWS)
445 false
446# else
447 true
448# endif
449 );
450 AssertLogRelRCReturn(rc, rc);
451#endif
452
453 /** @cfgm{/HM/MaxResumeLoops, uint32_t}
454 * The number of times to resume guest execution before we forcibly return to
455 * ring-3. The return value of RTThreadPreemptIsPendingTrusty in ring-0
456 * determines the default value. */
457 rc = CFGMR3QueryU32Def(pCfgHm, "MaxResumeLoops", &pVM->hm.s.cMaxResumeLoopsCfg, 0 /* set by R0 later */);
458 AssertLogRelRCReturn(rc, rc);
459
460 /** @cfgm{/HM/UseVmxPreemptTimer, bool}
461 * Whether to make use of the VMX-preemption timer feature of the CPU if it's
462 * available. */
463 rc = CFGMR3QueryBoolDef(pCfgHm, "UseVmxPreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimerCfg, true);
464 AssertLogRelRCReturn(rc, rc);
465
466 /** @cfgm{/HM/IBPBOnVMExit, bool}
467 * Costly paranoia setting. */
468 rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMExit", &pVM->hm.s.fIbpbOnVmExit, false);
469 AssertLogRelRCReturn(rc, rc);
470
471 /** @cfgm{/HM/IBPBOnVMEntry, bool}
472 * Costly paranoia setting. */
473 rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMEntry", &pVM->hm.s.fIbpbOnVmEntry, false);
474 AssertLogRelRCReturn(rc, rc);
475
476 /** @cfgm{/HM/L1DFlushOnSched, bool, true}
477 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */
478 rc = CFGMR3QueryBoolDef(pCfgHm, "L1DFlushOnSched", &pVM->hm.s.fL1dFlushOnSched, true);
479 AssertLogRelRCReturn(rc, rc);
480
481 /** @cfgm{/HM/L1DFlushOnVMEntry, bool}
482 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */
483 rc = CFGMR3QueryBoolDef(pCfgHm, "L1DFlushOnVMEntry", &pVM->hm.s.fL1dFlushOnVmEntry, false);
484 AssertLogRelRCReturn(rc, rc);
485
486 /* Disable L1DFlushOnSched if L1DFlushOnVMEntry is enabled. */
487 if (pVM->hm.s.fL1dFlushOnVmEntry)
488 pVM->hm.s.fL1dFlushOnSched = false;
489
490 /** @cfgm{/HM/SpecCtrlByHost, bool}
491 * Another expensive paranoia setting. */
492 rc = CFGMR3QueryBoolDef(pCfgHm, "SpecCtrlByHost", &pVM->hm.s.fSpecCtrlByHost, false);
493 AssertLogRelRCReturn(rc, rc);
494
495 /** @cfgm{/HM/MDSClearOnSched, bool, true}
496 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround,
497 * ignored on CPUs that aren't affected. */
498 rc = CFGMR3QueryBoolDef(pCfgHm, "MDSClearOnSched", &pVM->hm.s.fMdsClearOnSched, true);
499 AssertLogRelRCReturn(rc, rc);
500
501 /** @cfgm{/HM/MDSClearOnVmEntry, bool, false}
502 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround,
503 * ignored on CPUs that aren't affected. */
504 rc = CFGMR3QueryBoolDef(pCfgHm, "MDSClearOnVmEntry", &pVM->hm.s.fMdsClearOnVmEntry, false);
505 AssertLogRelRCReturn(rc, rc);
506
507 /* Disable MDSClearOnSched if MDSClearOnVmEntry is enabled. */
508 if (pVM->hm.s.fMdsClearOnVmEntry)
509 pVM->hm.s.fMdsClearOnSched = false;
510
511 /** @cfgm{/HM/LovelyMesaDrvWorkaround,bool}
512 * Workaround for mesa vmsvga 3d driver making incorrect assumptions about
513 * the hypervisor it is running under. */
514 bool fMesaWorkaround;
515 rc = CFGMR3QueryBoolDef(pCfgHm, "LovelyMesaDrvWorkaround", &fMesaWorkaround, false);
516 AssertLogRelRCReturn(rc, rc);
517 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
518 {
519 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
520 pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv = fMesaWorkaround;
521 }
522
523 /** @cfgm{/HM/MissingOS2TlbFlushWorkaround,bool}
524 * Workaround OS/2 not flushing the TLB after page directory and page table
525 * modifications when returning to protected mode from a real mode call
526 * (TESTCFG.SYS typically crashes). See ticketref:20625 for details. */
527 rc = CFGMR3QueryBoolDef(pCfgHm, "MissingOS2TlbFlushWorkaround", &pVM->hm.s.fMissingOS2TlbFlushWorkaround, false);
528 AssertLogRelRCReturn(rc, rc);
529
530 /*
531 * Check if VT-x or AMD-v support according to the users wishes.
532 */
533 /** @todo SUPR3QueryVTCaps won't catch VERR_VMX_IN_VMX_ROOT_MODE or
534 * VERR_SVM_IN_USE. */
535 if (pVM->fHMEnabled)
536 {
537 uint32_t fCaps;
538 rc = SUPR3QueryVTCaps(&fCaps);
539 if (RT_SUCCESS(rc))
540 {
541 if (fCaps & SUPVTCAPS_AMD_V)
542 {
543 pVM->hm.s.svm.fSupported = true;
544 LogRel(("HM: HMR3Init: AMD-V%s\n", fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : ""));
545 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_HW_VIRT);
546 }
547 else if (fCaps & SUPVTCAPS_VT_X)
548 {
549 const char *pszWhy;
550 rc = SUPR3QueryVTxSupported(&pszWhy);
551 if (RT_SUCCESS(rc))
552 {
553 pVM->hm.s.vmx.fSupported = true;
554 LogRel(("HM: HMR3Init: VT-x%s%s%s\n",
555 fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : "",
556 fCaps & SUPVTCAPS_VTX_UNRESTRICTED_GUEST ? " and unrestricted guest execution" : "",
557 (fCaps & (SUPVTCAPS_NESTED_PAGING | SUPVTCAPS_VTX_UNRESTRICTED_GUEST)) ? " hw support" : ""));
558 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_HW_VIRT);
559 }
560 else
561 {
562 /*
563 * Before failing, try fallback to NEM if we're allowed to do that.
564 */
565 pVM->fHMEnabled = false;
566 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET);
567 if (fFallbackToNEM)
568 {
569 LogRel(("HM: HMR3Init: Attempting fall back to NEM: The host kernel does not support VT-x - %s\n", pszWhy));
570 int rc2 = NEMR3Init(pVM, true /*fFallback*/, fHMForced);
571
572 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
573 if ( RT_SUCCESS(rc2)
574 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET)
575 rc = VINF_SUCCESS;
576 }
577 if (RT_FAILURE(rc))
578 return VMSetError(pVM, rc, RT_SRC_POS, "The host kernel does not support VT-x: %s\n", pszWhy);
579 }
580 }
581 else
582 AssertLogRelMsgFailedReturn(("SUPR3QueryVTCaps didn't return either AMD-V or VT-x flag set (%#x)!\n", fCaps),
583 VERR_INTERNAL_ERROR_5);
584
585 /*
586 * Disable nested paging and unrestricted guest execution now if they're
587 * configured so that CPUM can make decisions based on our configuration.
588 */
589 if ( fAllowNestedPaging
590 && (fCaps & SUPVTCAPS_NESTED_PAGING))
591 {
592 pVM->hm.s.fNestedPagingCfg = true;
593 if (fCaps & SUPVTCAPS_VT_X)
594 {
595 if ( fAllowUnrestricted
596 && (fCaps & SUPVTCAPS_VTX_UNRESTRICTED_GUEST))
597 pVM->hm.s.vmx.fUnrestrictedGuestCfg = true;
598 else
599 Assert(!pVM->hm.s.vmx.fUnrestrictedGuestCfg);
600 }
601 }
602 else
603 Assert(!pVM->hm.s.fNestedPagingCfg);
604 }
605 else
606 {
607 const char *pszMsg;
608 switch (rc)
609 {
610 case VERR_UNSUPPORTED_CPU: pszMsg = "Unknown CPU, VT-x or AMD-v features cannot be ascertained"; break;
611 case VERR_VMX_NO_VMX: pszMsg = "VT-x is not available"; break;
612 case VERR_VMX_MSR_VMX_DISABLED: pszMsg = "VT-x is disabled in the BIOS"; break;
613 case VERR_VMX_MSR_ALL_VMX_DISABLED: pszMsg = "VT-x is disabled in the BIOS for all CPU modes"; break;
614 case VERR_VMX_MSR_LOCKING_FAILED: pszMsg = "Failed to enable and lock VT-x features"; break;
615 case VERR_SVM_NO_SVM: pszMsg = "AMD-V is not available"; break;
616 case VERR_SVM_DISABLED: pszMsg = "AMD-V is disabled in the BIOS (or by the host OS)"; break;
617 case VERR_SUP_DRIVERLESS: pszMsg = "Driverless mode"; break;
618 default:
619 return VMSetError(pVM, rc, RT_SRC_POS, "SUPR3QueryVTCaps failed with %Rrc", rc);
620 }
621
622 /*
623 * Before failing, try fallback to NEM if we're allowed to do that.
624 */
625 pVM->fHMEnabled = false;
626 if (fFallbackToNEM)
627 {
628 LogRel(("HM: HMR3Init: Attempting fall back to NEM: %s\n", pszMsg));
629 int rc2 = NEMR3Init(pVM, true /*fFallback*/, fHMForced);
630 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
631 if ( RT_SUCCESS(rc2)
632 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET)
633 {
634 rc = VINF_SUCCESS;
635
636 /* For some reason, HM is in charge or large pages. Make sure to enable them: */
637 PGMSetLargePageUsage(pVM, pVM->hm.s.fLargePages);
638 }
639 }
640
641 /*
642 * Then try fall back on IEM if NEM isn't available and we're allowed to.
643 */
644 if (RT_FAILURE(rc))
645 {
646 if ( fFallbackToIEM
647 && (!fFallbackToNEM || rc == VERR_NEM_NOT_AVAILABLE || rc == VERR_SUP_DRIVERLESS))
648 {
649 LogRel(("HM: HMR3Init: Falling back on IEM: %s\n", !fFallbackToNEM ? pszMsg : "NEM not available"));
650 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_IEM);
651#ifdef VBOX_WITH_PGM_NEM_MODE
652 PGMR3EnableNemMode(pVM);
653#endif
654 }
655 else
656 return VM_SET_ERROR(pVM, rc, pszMsg);
657 }
658 }
659 }
660 else
661 {
662 /*
663 * Disabled HM mean raw-mode, unless NEM is supposed to be used.
664 */
665 rc = VERR_NEM_NOT_AVAILABLE;
666 if (fUseNEMInstead)
667 {
668 rc = NEMR3Init(pVM, false /*fFallback*/, true);
669 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
670 if (RT_SUCCESS(rc))
671 {
672 /* For some reason, HM is in charge or large pages. Make sure to enable them: */
673 PGMSetLargePageUsage(pVM, pVM->hm.s.fLargePages);
674 }
675 else if (!fFallbackToIEM || rc != VERR_NEM_NOT_AVAILABLE)
676 return rc;
677 }
678
679 if (fFallbackToIEM && rc == VERR_NEM_NOT_AVAILABLE)
680 {
681 LogRel(("HM: HMR3Init: Falling back on IEM%s\n", fUseNEMInstead ? ": NEM not available" : ""));
682 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_IEM);
683#ifdef VBOX_WITH_PGM_NEM_MODE
684 PGMR3EnableNemMode(pVM);
685#endif
686 }
687
688 if ( pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET
689 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT /* paranoia */)
690 return VM_SET_ERROR(pVM, rc, "Misconfigured VM: No guest execution engine available!");
691 }
692
693 if (pVM->fHMEnabled)
694 {
695 /*
696 * Register info handlers now that HM is used for sure.
697 */
698 rc = DBGFR3InfoRegisterInternalEx(pVM, "hm", "Dumps HM info.", hmR3Info, DBGFINFO_FLAGS_ALL_EMTS);
699 AssertRCReturn(rc, rc);
700
701 rc = DBGFR3InfoRegisterInternalEx(pVM, "hmeventpending", "Dumps the pending HM event.", hmR3InfoEventPending,
702 DBGFINFO_FLAGS_ALL_EMTS);
703 AssertRCReturn(rc, rc);
704
705 rc = DBGFR3InfoRegisterInternalEx(pVM, "svmvmcbcache", "Dumps the HM SVM nested-guest VMCB cache.",
706 hmR3InfoSvmNstGstVmcbCache, DBGFINFO_FLAGS_ALL_EMTS);
707 AssertRCReturn(rc, rc);
708
709 rc = DBGFR3InfoRegisterInternalEx(pVM, "lbr", "Dumps the HM LBR info.", hmR3InfoLbr, DBGFINFO_FLAGS_ALL_EMTS);
710 AssertRCReturn(rc, rc);
711 }
712
713 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET);
714 return VINF_SUCCESS;
715}
716
717
718/**
719 * Initializes HM components after ring-3 phase has been fully initialized.
720 *
721 * @returns VBox status code.
722 * @param pVM The cross context VM structure.
723 */
724static int hmR3InitFinalizeR3(PVM pVM)
725{
726 LogFlowFunc(("\n"));
727
728 if (!HMIsEnabled(pVM))
729 return VINF_SUCCESS;
730
731 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
732 {
733 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
734 pVCpu->hm.s.fActive = false;
735 pVCpu->hm.s.fGIMTrapXcptUD = GIMShouldTrapXcptUD(pVCpu); /* Is safe to call now since GIMR3Init() has completed. */
736 pVCpu->hm.s.fGCMTrapXcptDE = GCMShouldTrapXcptDE(pVCpu); /* Is safe to call now since GCMR3Init() has completed. */
737 }
738
739#if defined(RT_ARCH_AMD64) ||defined(RT_ARCH_X86)
740 /*
741 * Check if L1D flush is needed/possible.
742 */
743 if ( !g_CpumHostFeatures.s.fFlushCmd
744 || g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem
745 || g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Core7_End
746 || g_CpumHostFeatures.s.fArchVmmNeedNotFlushL1d
747 || g_CpumHostFeatures.s.fArchRdclNo)
748 pVM->hm.s.fL1dFlushOnSched = pVM->hm.s.fL1dFlushOnVmEntry = false;
749
750 /*
751 * Check if MDS flush is needed/possible.
752 * On atoms and knight family CPUs, we will only allow clearing on scheduling.
753 */
754 if ( !g_CpumHostFeatures.s.fMdsClear
755 || g_CpumHostFeatures.s.fArchMdsNo)
756 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry = false;
757 else if ( ( g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Atom_Airmount
758 && g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Atom_End)
759 || ( g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Phi_KnightsLanding
760 && g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Phi_End))
761 {
762 if (!pVM->hm.s.fMdsClearOnSched)
763 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry;
764 pVM->hm.s.fMdsClearOnVmEntry = false;
765 }
766 else if ( g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem
767 || g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Core7_End)
768 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry = false;
769#endif
770
771 /*
772 * Statistics.
773 */
774#ifdef VBOX_WITH_STATISTICS
775 STAM_REG(pVM, &pVM->hm.s.StatTprPatchSuccess, STAMTYPE_COUNTER, "/HM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
776 STAM_REG(pVM, &pVM->hm.s.StatTprPatchFailure, STAMTYPE_COUNTER, "/HM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
777 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccessCr8, STAMTYPE_COUNTER, "/HM/TPR/Replace/SuccessCR8", STAMUNIT_OCCURENCES, "Number of instruction replacements by MOV CR8.");
778 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccessVmc, STAMTYPE_COUNTER, "/HM/TPR/Replace/SuccessVMC", STAMUNIT_OCCURENCES, "Number of instruction replacements by VMMCALL.");
779 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceFailure, STAMTYPE_COUNTER, "/HM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful replace attempts.");
780#endif
781
782#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
783 bool const fCpuSupportsVmx = ASMIsIntelCpu() || ASMIsViaCentaurCpu() || ASMIsShanghaiCpu();
784#else
785 bool const fCpuSupportsVmx = false;
786#endif
787 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
788 {
789 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
790 PHMCPU pHmCpu = &pVCpu->hm.s;
791 int rc;
792
793# define HM_REG_STAT(a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szNmFmt, a_szDesc) do { \
794 rc = STAMR3RegisterF(pVM, a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szDesc, a_szNmFmt, idCpu); \
795 AssertRC(rc); \
796 } while (0)
797# define HM_REG_PROFILE(a_pVar, a_szNmFmt, a_szDesc) \
798 HM_REG_STAT(a_pVar, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, a_szNmFmt, a_szDesc)
799
800#ifdef VBOX_WITH_STATISTICS
801 HM_REG_PROFILE(&pHmCpu->StatPoke, "/PROF/CPU%u/HM/Poke", "Profiling of RTMpPokeCpu.");
802 HM_REG_PROFILE(&pHmCpu->StatSpinPoke, "/PROF/CPU%u/HM/PokeWait", "Profiling of poke wait.");
803 HM_REG_PROFILE(&pHmCpu->StatSpinPokeFailed, "/PROF/CPU%u/HM/PokeWaitFailed", "Profiling of poke wait when RTMpPokeCpu fails.");
804 HM_REG_PROFILE(&pHmCpu->StatEntry, "/PROF/CPU%u/HM/Entry", "Profiling of entry until entering GC.");
805 HM_REG_PROFILE(&pHmCpu->StatPreExit, "/PROF/CPU%u/HM/SwitchFromGC_1", "Profiling of pre-exit processing after returning from GC.");
806 HM_REG_PROFILE(&pHmCpu->StatExitHandling, "/PROF/CPU%u/HM/SwitchFromGC_2", "Profiling of exit handling (longjmps not included!)");
807 HM_REG_PROFILE(&pHmCpu->StatExitIO, "/PROF/CPU%u/HM/SwitchFromGC_2/IO", "I/O.");
808 HM_REG_PROFILE(&pHmCpu->StatExitMovCRx, "/PROF/CPU%u/HM/SwitchFromGC_2/MovCRx", "MOV CRx.");
809 HM_REG_PROFILE(&pHmCpu->StatExitXcptNmi, "/PROF/CPU%u/HM/SwitchFromGC_2/XcptNmi", "Exceptions, NMIs.");
810 HM_REG_PROFILE(&pHmCpu->StatExitVmentry, "/PROF/CPU%u/HM/SwitchFromGC_2/Vmentry", "VMLAUNCH/VMRESUME on Intel or VMRUN on AMD.");
811 HM_REG_PROFILE(&pHmCpu->StatImportGuestState, "/PROF/CPU%u/HM/ImportGuestState", "Profiling of importing guest state from hardware after VM-exit.");
812 HM_REG_PROFILE(&pHmCpu->StatExportGuestState, "/PROF/CPU%u/HM/ExportGuestState", "Profiling of exporting guest state to hardware before VM-entry.");
813 HM_REG_PROFILE(&pHmCpu->StatLoadGuestFpuState, "/PROF/CPU%u/HM/LoadGuestFpuState", "Profiling of CPUMR0LoadGuestFPU.");
814 HM_REG_PROFILE(&pHmCpu->StatInGC, "/PROF/CPU%u/HM/InGC", "Profiling of execution of guest-code in hardware.");
815# ifdef HM_PROFILE_EXIT_DISPATCH
816 HM_REG_STAT(&pHmCpu->StatExitDispatch, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
817 "/PROF/CPU%u/HM/ExitDispatch", "Profiling the dispatching of exit handlers.");
818# endif
819#endif
820# define HM_REG_COUNTER(a, b, desc) HM_REG_STAT(a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, b, desc)
821
822 HM_REG_COUNTER(&pHmCpu->StatImportGuestStateFallback, "/HM/CPU%u/ImportGuestStateFallback", "Times vmxHCImportGuestState took the fallback code path.");
823 HM_REG_COUNTER(&pHmCpu->StatReadToTransientFallback, "/HM/CPU%u/ReadToTransientFallback", "Times vmxHCReadToTransient took the fallback code path.");
824#ifdef VBOX_WITH_STATISTICS
825 HM_REG_COUNTER(&pHmCpu->StatExitAll, "/HM/CPU%u/Exit/All", "Total exits (excludes nested-guest and debug loops exits).");
826 HM_REG_COUNTER(&pHmCpu->StatDebugExitAll, "/HM/CPU%u/Exit/DebugAll", "Total debug-loop exits.");
827 HM_REG_COUNTER(&pHmCpu->StatNestedExitAll, "/HM/CPU%u/Exit/NestedGuest/All", "Total nested-guest exits.");
828 HM_REG_COUNTER(&pHmCpu->StatExitShadowNM, "/HM/CPU%u/Exit/Trap/Shw/#NM", "Shadow #NM (device not available, no math co-processor) exception.");
829 HM_REG_COUNTER(&pHmCpu->StatExitGuestNM, "/HM/CPU%u/Exit/Trap/Gst/#NM", "Guest #NM (device not available, no math co-processor) exception.");
830 HM_REG_COUNTER(&pHmCpu->StatExitShadowPF, "/HM/CPU%u/Exit/Trap/Shw/#PF", "Shadow #PF (page fault) exception.");
831 HM_REG_COUNTER(&pHmCpu->StatExitShadowPFEM, "/HM/CPU%u/Exit/Trap/Shw/#PF-EM", "#PF (page fault) exception going back to ring-3 for emulating the instruction.");
832 HM_REG_COUNTER(&pHmCpu->StatExitGuestPF, "/HM/CPU%u/Exit/Trap/Gst/#PF", "Guest #PF (page fault) exception.");
833 HM_REG_COUNTER(&pHmCpu->StatExitGuestUD, "/HM/CPU%u/Exit/Trap/Gst/#UD", "Guest #UD (undefined opcode) exception.");
834 HM_REG_COUNTER(&pHmCpu->StatExitGuestSS, "/HM/CPU%u/Exit/Trap/Gst/#SS", "Guest #SS (stack-segment fault) exception.");
835 HM_REG_COUNTER(&pHmCpu->StatExitGuestNP, "/HM/CPU%u/Exit/Trap/Gst/#NP", "Guest #NP (segment not present) exception.");
836 HM_REG_COUNTER(&pHmCpu->StatExitGuestTS, "/HM/CPU%u/Exit/Trap/Gst/#TS", "Guest #TS (task switch) exception.");
837 HM_REG_COUNTER(&pHmCpu->StatExitGuestOF, "/HM/CPU%u/Exit/Trap/Gst/#OF", "Guest #OF (overflow) exception.");
838 HM_REG_COUNTER(&pHmCpu->StatExitGuestGP, "/HM/CPU%u/Exit/Trap/Gst/#GP", "Guest #GP (general protection) exception.");
839 HM_REG_COUNTER(&pHmCpu->StatExitGuestDE, "/HM/CPU%u/Exit/Trap/Gst/#DE", "Guest #DE (divide error) exception.");
840 HM_REG_COUNTER(&pHmCpu->StatExitGuestDF, "/HM/CPU%u/Exit/Trap/Gst/#DF", "Guest #DF (double fault) exception.");
841 HM_REG_COUNTER(&pHmCpu->StatExitGuestBR, "/HM/CPU%u/Exit/Trap/Gst/#BR", "Guest #BR (boundary range exceeded) exception.");
842#endif
843 HM_REG_COUNTER(&pHmCpu->StatExitGuestAC, "/HM/CPU%u/Exit/Trap/Gst/#AC", "Guest #AC (alignment check) exception.");
844 if (fCpuSupportsVmx)
845 HM_REG_COUNTER(&pHmCpu->StatExitGuestACSplitLock, "/HM/CPU%u/Exit/Trap/Gst/#AC-split-lock", "Guest triggered #AC due to split-lock being enabled on the host (interpreted).");
846#ifdef VBOX_WITH_STATISTICS
847 HM_REG_COUNTER(&pHmCpu->StatExitGuestDB, "/HM/CPU%u/Exit/Trap/Gst/#DB", "Guest #DB (debug) exception.");
848 HM_REG_COUNTER(&pHmCpu->StatExitGuestMF, "/HM/CPU%u/Exit/Trap/Gst/#MF", "Guest #MF (x87 FPU error, math fault) exception.");
849 HM_REG_COUNTER(&pHmCpu->StatExitGuestBP, "/HM/CPU%u/Exit/Trap/Gst/#BP", "Guest #BP (breakpoint) exception.");
850 HM_REG_COUNTER(&pHmCpu->StatExitGuestXF, "/HM/CPU%u/Exit/Trap/Gst/#XF", "Guest #XF (extended math fault, SIMD FPU) exception.");
851 HM_REG_COUNTER(&pHmCpu->StatExitGuestXcpUnk, "/HM/CPU%u/Exit/Trap/Gst/Other", "Other guest exceptions.");
852 HM_REG_COUNTER(&pHmCpu->StatExitRdmsr, "/HM/CPU%u/Exit/Instr/Rdmsr", "MSR read.");
853 HM_REG_COUNTER(&pHmCpu->StatExitWrmsr, "/HM/CPU%u/Exit/Instr/Wrmsr", "MSR write.");
854 HM_REG_COUNTER(&pHmCpu->StatExitDRxWrite, "/HM/CPU%u/Exit/Instr/DR-Write", "Debug register write.");
855 HM_REG_COUNTER(&pHmCpu->StatExitDRxRead, "/HM/CPU%u/Exit/Instr/DR-Read", "Debug register read.");
856 HM_REG_COUNTER(&pHmCpu->StatExitCR0Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR0", "CR0 read.");
857 HM_REG_COUNTER(&pHmCpu->StatExitCR2Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR2", "CR2 read.");
858 HM_REG_COUNTER(&pHmCpu->StatExitCR3Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR3", "CR3 read.");
859 HM_REG_COUNTER(&pHmCpu->StatExitCR4Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR4", "CR4 read.");
860 HM_REG_COUNTER(&pHmCpu->StatExitCR8Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR8", "CR8 read.");
861 HM_REG_COUNTER(&pHmCpu->StatExitCR0Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR0", "CR0 write.");
862 HM_REG_COUNTER(&pHmCpu->StatExitCR2Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR2", "CR2 write.");
863 HM_REG_COUNTER(&pHmCpu->StatExitCR3Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR3", "CR3 write.");
864 HM_REG_COUNTER(&pHmCpu->StatExitCR4Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR4", "CR4 write.");
865 HM_REG_COUNTER(&pHmCpu->StatExitCR8Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR8", "CR8 write.");
866 HM_REG_COUNTER(&pHmCpu->StatExitClts, "/HM/CPU%u/Exit/Instr/CLTS", "CLTS instruction.");
867 HM_REG_COUNTER(&pHmCpu->StatExitLmsw, "/HM/CPU%u/Exit/Instr/LMSW", "LMSW instruction.");
868 HM_REG_COUNTER(&pHmCpu->StatExitXdtrAccess, "/HM/CPU%u/Exit/Instr/XdtrAccess", "GDTR, IDTR, LDTR access.");
869 HM_REG_COUNTER(&pHmCpu->StatExitIOWrite, "/HM/CPU%u/Exit/Instr/IO/Write", "I/O write.");
870 HM_REG_COUNTER(&pHmCpu->StatExitIORead, "/HM/CPU%u/Exit/Instr/IO/Read", "I/O read.");
871 HM_REG_COUNTER(&pHmCpu->StatExitIOStringWrite, "/HM/CPU%u/Exit/Instr/IO/WriteString", "String I/O write.");
872 HM_REG_COUNTER(&pHmCpu->StatExitIOStringRead, "/HM/CPU%u/Exit/Instr/IO/ReadString", "String I/O read.");
873 HM_REG_COUNTER(&pHmCpu->StatExitIntWindow, "/HM/CPU%u/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts.");
874 HM_REG_COUNTER(&pHmCpu->StatExitExtInt, "/HM/CPU%u/Exit/ExtInt", "Physical maskable interrupt (host).");
875#endif
876 HM_REG_COUNTER(&pHmCpu->StatExitHostNmiInGC, "/HM/CPU%u/Exit/HostNmiInGC", "Host NMI received while in guest context.");
877 HM_REG_COUNTER(&pHmCpu->StatExitHostNmiInGCIpi, "/HM/CPU%u/Exit/HostNmiInGCIpi", "Host NMI received while in guest context dispatched using IPIs.");
878 HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer, "/HM/CPU%u/Exit/PreemptTimer", "VMX-preemption timer expired.");
879#ifdef VBOX_WITH_STATISTICS
880 HM_REG_COUNTER(&pHmCpu->StatExitTprBelowThreshold, "/HM/CPU%u/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest.");
881 HM_REG_COUNTER(&pHmCpu->StatExitTaskSwitch, "/HM/CPU%u/Exit/TaskSwitch", "Task switch caused through task gate in IDT.");
882 HM_REG_COUNTER(&pHmCpu->StatExitApicAccess, "/HM/CPU%u/Exit/ApicAccess", "APIC access. Guest attempted to access memory at a physical address on the APIC-access page.");
883
884 HM_REG_COUNTER(&pHmCpu->StatSwitchTprMaskedIrq, "/HM/CPU%u/Switch/TprMaskedIrq", "PDMGetInterrupt() signals TPR masks pending Irq.");
885 HM_REG_COUNTER(&pHmCpu->StatSwitchGuestIrq, "/HM/CPU%u/Switch/IrqPending", "PDMGetInterrupt() cleared behind our back!?!.");
886 HM_REG_COUNTER(&pHmCpu->StatSwitchPendingHostIrq, "/HM/CPU%u/Switch/PendingHostIrq", "Exit to ring-3 due to pending host interrupt before executing guest code.");
887 HM_REG_COUNTER(&pHmCpu->StatSwitchHmToR3FF, "/HM/CPU%u/Switch/HmToR3FF", "Exit to ring-3 due to pending timers, EMT rendezvous, critical section etc.");
888 HM_REG_COUNTER(&pHmCpu->StatSwitchVmReq, "/HM/CPU%u/Switch/VmReq", "Exit to ring-3 due to pending VM requests.");
889 HM_REG_COUNTER(&pHmCpu->StatSwitchPgmPoolFlush, "/HM/CPU%u/Switch/PgmPoolFlush", "Exit to ring-3 due to pending PGM pool flush.");
890 HM_REG_COUNTER(&pHmCpu->StatSwitchDma, "/HM/CPU%u/Switch/PendingDma", "Exit to ring-3 due to pending DMA requests.");
891 HM_REG_COUNTER(&pHmCpu->StatSwitchExitToR3, "/HM/CPU%u/Switch/ExitToR3", "Exit to ring-3 (total).");
892 HM_REG_COUNTER(&pHmCpu->StatSwitchLongJmpToR3, "/HM/CPU%u/Switch/LongJmpToR3", "Longjump to ring-3.");
893 HM_REG_COUNTER(&pHmCpu->StatSwitchMaxResumeLoops, "/HM/CPU%u/Switch/MaxResumeLoops", "Maximum VMRESUME inner-loop counter reached.");
894 HM_REG_COUNTER(&pHmCpu->StatSwitchHltToR3, "/HM/CPU%u/Switch/HltToR3", "HLT causing us to go to ring-3.");
895 HM_REG_COUNTER(&pHmCpu->StatSwitchApicAccessToR3, "/HM/CPU%u/Switch/ApicAccessToR3", "APIC access causing us to go to ring-3.");
896#endif
897 HM_REG_COUNTER(&pHmCpu->StatSwitchPreempt, "/HM/CPU%u/Switch/Preempting", "EMT has been preempted while in HM context.");
898#ifdef VBOX_WITH_STATISTICS
899 HM_REG_COUNTER(&pHmCpu->StatSwitchNstGstVmexit, "/HM/CPU%u/Switch/NstGstVmexit", "Nested-guest VM-exit occurred.");
900
901 HM_REG_COUNTER(&pHmCpu->StatInjectInterrupt, "/HM/CPU%u/EventInject/Interrupt", "Injected an external interrupt into the guest.");
902 HM_REG_COUNTER(&pHmCpu->StatInjectXcpt, "/HM/CPU%u/EventInject/Trap", "Injected an exception into the guest.");
903 HM_REG_COUNTER(&pHmCpu->StatInjectReflect, "/HM/CPU%u/EventInject/Reflect", "Reflecting an exception caused due to event injection.");
904 HM_REG_COUNTER(&pHmCpu->StatInjectConvertDF, "/HM/CPU%u/EventInject/ReflectDF", "Injected a converted #DF caused due to event injection.");
905 HM_REG_COUNTER(&pHmCpu->StatInjectInterpret, "/HM/CPU%u/EventInject/Interpret", "Falling back to interpreter for handling exception caused due to event injection.");
906 HM_REG_COUNTER(&pHmCpu->StatInjectReflectNPF, "/HM/CPU%u/EventInject/ReflectNPF", "Reflecting event that caused an EPT violation / nested #PF.");
907
908 HM_REG_COUNTER(&pHmCpu->StatFlushPage, "/HM/CPU%u/Flush/Page", "Invalidating a guest page on all guest CPUs.");
909 HM_REG_COUNTER(&pHmCpu->StatFlushPageManual, "/HM/CPU%u/Flush/Page/Virt", "Invalidating a guest page using guest-virtual address.");
910 HM_REG_COUNTER(&pHmCpu->StatFlushPhysPageManual, "/HM/CPU%u/Flush/Page/Phys", "Invalidating a guest page using guest-physical address.");
911 HM_REG_COUNTER(&pHmCpu->StatFlushTlb, "/HM/CPU%u/Flush/TLB", "Forcing a full guest-TLB flush (ring-0).");
912 HM_REG_COUNTER(&pHmCpu->StatFlushTlbManual, "/HM/CPU%u/Flush/TLB/Manual", "Request a full guest-TLB flush.");
913 HM_REG_COUNTER(&pHmCpu->StatFlushTlbNstGst, "/HM/CPU%u/Flush/TLB/NestedGuest", "Request a nested-guest-TLB flush.");
914 HM_REG_COUNTER(&pHmCpu->StatFlushTlbWorldSwitch, "/HM/CPU%u/Flush/TLB/CpuSwitch", "Forcing a full guest-TLB flush due to host-CPU reschedule or ASID-limit hit by another guest-VCPU.");
915 HM_REG_COUNTER(&pHmCpu->StatNoFlushTlbWorldSwitch, "/HM/CPU%u/Flush/TLB/Skipped", "No TLB flushing required.");
916 HM_REG_COUNTER(&pHmCpu->StatFlushEntire, "/HM/CPU%u/Flush/TLB/Entire", "Flush the entire TLB (host + guest).");
917 HM_REG_COUNTER(&pHmCpu->StatFlushAsid, "/HM/CPU%u/Flush/TLB/ASID", "Flushed guest-TLB entries for the current VPID.");
918 HM_REG_COUNTER(&pHmCpu->StatFlushNestedPaging, "/HM/CPU%u/Flush/TLB/NestedPaging", "Flushed guest-TLB entries for the current EPT.");
919 HM_REG_COUNTER(&pHmCpu->StatFlushTlbInvlpgVirt, "/HM/CPU%u/Flush/TLB/InvlpgVirt", "Invalidated a guest-TLB entry for a guest-virtual address.");
920 HM_REG_COUNTER(&pHmCpu->StatFlushTlbInvlpgPhys, "/HM/CPU%u/Flush/TLB/InvlpgPhys", "Currently not possible, flushes entire guest-TLB.");
921 HM_REG_COUNTER(&pHmCpu->StatTlbShootdown, "/HM/CPU%u/Flush/Shootdown/Page", "Inter-VCPU request to flush queued guest page.");
922 HM_REG_COUNTER(&pHmCpu->StatTlbShootdownFlush, "/HM/CPU%u/Flush/Shootdown/TLB", "Inter-VCPU request to flush entire guest-TLB.");
923
924 HM_REG_COUNTER(&pHmCpu->StatTscParavirt, "/HM/CPU%u/TSC/Paravirt", "Paravirtualized TSC in effect.");
925 HM_REG_COUNTER(&pHmCpu->StatTscOffset, "/HM/CPU%u/TSC/Offset", "TSC offsetting is in effect.");
926 HM_REG_COUNTER(&pHmCpu->StatTscIntercept, "/HM/CPU%u/TSC/Intercept", "Intercept TSC accesses.");
927
928 HM_REG_COUNTER(&pHmCpu->StatDRxArmed, "/HM/CPU%u/Debug/Armed", "Loaded guest-debug state while loading guest-state.");
929 HM_REG_COUNTER(&pHmCpu->StatDRxContextSwitch, "/HM/CPU%u/Debug/ContextSwitch", "Loaded guest-debug state on MOV DRx.");
930 HM_REG_COUNTER(&pHmCpu->StatDRxIoCheck, "/HM/CPU%u/Debug/IOCheck", "Checking for I/O breakpoint.");
931
932 HM_REG_COUNTER(&pHmCpu->StatExportMinimal, "/HM/CPU%u/Export/Minimal", "VM-entry exporting minimal guest-state.");
933 HM_REG_COUNTER(&pHmCpu->StatExportFull, "/HM/CPU%u/Export/Full", "VM-entry exporting the full guest-state.");
934 HM_REG_COUNTER(&pHmCpu->StatLoadGuestFpu, "/HM/CPU%u/Export/GuestFpu", "VM-entry loading the guest-FPU state.");
935 HM_REG_COUNTER(&pHmCpu->StatExportHostState, "/HM/CPU%u/Export/HostState", "VM-entry exporting host-state.");
936
937 if (fCpuSupportsVmx)
938 {
939 HM_REG_COUNTER(&pHmCpu->StatVmxWriteHostRip, "/HM/CPU%u/WriteHostRIP", "Number of VMX_VMCS_HOST_RIP instructions.");
940 HM_REG_COUNTER(&pHmCpu->StatVmxWriteHostRsp, "/HM/CPU%u/WriteHostRSP", "Number of VMX_VMCS_HOST_RSP instructions.");
941 HM_REG_COUNTER(&pHmCpu->StatVmxVmLaunch, "/HM/CPU%u/VMLaunch", "Number of VM-entries using VMLAUNCH.");
942 HM_REG_COUNTER(&pHmCpu->StatVmxVmResume, "/HM/CPU%u/VMResume", "Number of VM-entries using VMRESUME.");
943 }
944
945 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelBase, "/HM/CPU%u/VMXCheck/RMSelBase", "Could not use VMX due to unsuitable real-mode selector base.");
946 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelLimit, "/HM/CPU%u/VMXCheck/RMSelLimit", "Could not use VMX due to unsuitable real-mode selector limit.");
947 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelAttr, "/HM/CPU%u/VMXCheck/RMSelAttrs", "Could not use VMX due to unsuitable real-mode selector attributes.");
948
949 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelBase, "/HM/CPU%u/VMXCheck/V86SelBase", "Could not use VMX due to unsuitable v8086-mode selector base.");
950 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelLimit, "/HM/CPU%u/VMXCheck/V86SelLimit", "Could not use VMX due to unsuitable v8086-mode selector limit.");
951 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelAttr, "/HM/CPU%u/VMXCheck/V86SelAttrs", "Could not use VMX due to unsuitable v8086-mode selector attributes.");
952
953 HM_REG_COUNTER(&pHmCpu->StatVmxCheckRmOk, "/HM/CPU%u/VMXCheck/VMX_RM", "VMX execution in real (V86) mode OK.");
954 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadSel, "/HM/CPU%u/VMXCheck/Selector", "Could not use VMX due to unsuitable selector.");
955 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRpl, "/HM/CPU%u/VMXCheck/RPL", "Could not use VMX due to unsuitable RPL.");
956 HM_REG_COUNTER(&pHmCpu->StatVmxCheckPmOk, "/HM/CPU%u/VMXCheck/VMX_PM", "VMX execution in protected mode OK.");
957#endif
958 if (fCpuSupportsVmx)
959 {
960 HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer, "/HM/CPU%u/PreemptTimer", "VMX-preemption timer fired.");
961 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionReusingDeadline, "/HM/CPU%u/PreemptTimer/ReusingDeadline", "VMX-preemption timer arming logic using previously calculated deadline");
962 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionReusingDeadlineExpired, "/HM/CPU%u/PreemptTimer/ReusingDeadlineExpired", "VMX-preemption timer arming logic found previous deadline already expired (ignored)");
963 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionRecalcingDeadline, "/HM/CPU%u/PreemptTimer/RecalcingDeadline", "VMX-preemption timer arming logic recalculating the deadline (slightly expensive)");
964 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionRecalcingDeadlineExpired, "/HM/CPU%u/PreemptTimer/RecalcingDeadlineExpired", "VMX-preemption timer arming logic found recalculated deadline expired (ignored)");
965 }
966#ifdef VBOX_WITH_STATISTICS
967 /*
968 * Guest Exit reason stats.
969 */
970 if (fCpuSupportsVmx)
971 {
972 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
973 {
974 const char *pszExitName = HMGetVmxExitName(j);
975 if (pszExitName)
976 {
977 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
978 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/Reason/%02x", idCpu, j);
979 AssertRCReturn(rc, rc);
980 }
981 }
982 }
983 else
984 {
985 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
986 {
987 const char *pszExitName = HMGetSvmExitName(j);
988 if (pszExitName)
989 {
990 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
991 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/Reason/%02x", idCpu, j);
992 AssertRC(rc);
993 }
994 }
995 }
996 HM_REG_COUNTER(&pHmCpu->StatExitReasonNpf, "/HM/CPU%u/Exit/Reason/#NPF", "Nested page faults");
997
998#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
999 /*
1000 * Nested-guest VM-exit reason stats.
1001 */
1002 if (fCpuSupportsVmx)
1003 {
1004 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
1005 {
1006 const char *pszExitName = HMGetVmxExitName(j);
1007 if (pszExitName)
1008 {
1009 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
1010 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/NestedGuest/Reason/%02x", idCpu, j);
1011 AssertRC(rc);
1012 }
1013 }
1014 }
1015 else
1016 {
1017 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
1018 {
1019 const char *pszExitName = HMGetSvmExitName(j);
1020 if (pszExitName)
1021 {
1022 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
1023 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/NestedGuest/Reason/%02x", idCpu, j);
1024 AssertRC(rc);
1025 }
1026 }
1027 }
1028 HM_REG_COUNTER(&pHmCpu->StatNestedExitReasonNpf, "/HM/CPU%u/Exit/NestedGuest/Reason/#NPF", "Nested page faults");
1029#endif
1030
1031 /*
1032 * Injected interrupts stats.
1033 */
1034 char szDesc[64];
1035 for (unsigned j = 0; j < RT_ELEMENTS(pHmCpu->aStatInjectedIrqs); j++)
1036 {
1037 RTStrPrintf(&szDesc[0], sizeof(szDesc), "Interrupt %u", j);
1038 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
1039 STAMUNIT_OCCURENCES, szDesc, "/HM/CPU%u/EventInject/InjectIntr/%02X", idCpu, j);
1040 AssertRC(rc);
1041 }
1042
1043 /*
1044 * Injected exception stats.
1045 */
1046 for (unsigned j = 0; j < RT_ELEMENTS(pHmCpu->aStatInjectedXcpts); j++)
1047 {
1048 RTStrPrintf(&szDesc[0], sizeof(szDesc), "%s exception", hmR3GetXcptName(j));
1049 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatInjectedXcpts[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
1050 STAMUNIT_OCCURENCES, szDesc, "/HM/CPU%u/EventInject/InjectXcpt/%02X", idCpu, j);
1051 AssertRC(rc);
1052 }
1053
1054#endif /* VBOX_WITH_STATISTICS */
1055#undef HM_REG_COUNTER
1056#undef HM_REG_PROFILE
1057#undef HM_REG_STAT
1058 }
1059
1060 return VINF_SUCCESS;
1061}
1062
1063
1064/**
1065 * Called when a init phase has completed.
1066 *
1067 * @returns VBox status code.
1068 * @param pVM The cross context VM structure.
1069 * @param enmWhat The phase that completed.
1070 */
1071VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1072{
1073 switch (enmWhat)
1074 {
1075 case VMINITCOMPLETED_RING3:
1076 return hmR3InitFinalizeR3(pVM);
1077 case VMINITCOMPLETED_RING0:
1078 return hmR3InitFinalizeR0(pVM);
1079 default:
1080 return VINF_SUCCESS;
1081 }
1082}
1083
1084
1085/**
1086 * Turns off normal raw mode features.
1087 *
1088 * @param pVM The cross context VM structure.
1089 */
1090static void hmR3DisableRawMode(PVM pVM)
1091{
1092/** @todo r=bird: HM shouldn't be doing this crap. */
1093 /* Reinit the paging mode to force the new shadow mode. */
1094 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1095 {
1096 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1097 PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL, false /* fForce */);
1098 }
1099}
1100
1101
1102/**
1103 * Initialize VT-x or AMD-V.
1104 *
1105 * @returns VBox status code.
1106 * @param pVM The cross context VM structure.
1107 */
1108static int hmR3InitFinalizeR0(PVM pVM)
1109{
1110 int rc;
1111
1112 if (!HMIsEnabled(pVM))
1113 return VINF_SUCCESS;
1114
1115 /*
1116 * Hack to allow users to work around broken BIOSes that incorrectly set
1117 * EFER.SVME, which makes us believe somebody else is already using AMD-V.
1118 */
1119 if ( !pVM->hm.s.vmx.fSupported
1120 && !pVM->hm.s.svm.fSupported
1121 && pVM->hm.s.ForR3.rcInit == VERR_SVM_IN_USE /* implies functional AMD-V */
1122 && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
1123 {
1124 LogRel(("HM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
1125 pVM->hm.s.svm.fSupported = true;
1126 pVM->hm.s.svm.fIgnoreInUseError = true;
1127 pVM->hm.s.ForR3.rcInit = VINF_SUCCESS;
1128 }
1129
1130 /*
1131 * Report ring-0 init errors.
1132 */
1133 if ( !pVM->hm.s.vmx.fSupported
1134 && !pVM->hm.s.svm.fSupported)
1135 {
1136 LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.ForR3.rcInit));
1137 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.ForR3.vmx.u64HostFeatCtrl));
1138 switch (pVM->hm.s.ForR3.rcInit)
1139 {
1140 case VERR_VMX_IN_VMX_ROOT_MODE:
1141 return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor");
1142 case VERR_VMX_NO_VMX:
1143 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available");
1144 case VERR_VMX_MSR_VMX_DISABLED:
1145 return VM_SET_ERROR(pVM, VERR_VMX_MSR_VMX_DISABLED, "VT-x is disabled in the BIOS");
1146 case VERR_VMX_MSR_ALL_VMX_DISABLED:
1147 return VM_SET_ERROR(pVM, VERR_VMX_MSR_ALL_VMX_DISABLED, "VT-x is disabled in the BIOS for all CPU modes");
1148 case VERR_VMX_MSR_LOCKING_FAILED:
1149 return VM_SET_ERROR(pVM, VERR_VMX_MSR_LOCKING_FAILED, "Failed to lock VT-x features while trying to enable VT-x");
1150 case VERR_VMX_MSR_VMX_ENABLE_FAILED:
1151 return VM_SET_ERROR(pVM, VERR_VMX_MSR_VMX_ENABLE_FAILED, "Failed to enable VT-x features");
1152 case VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED:
1153 return VM_SET_ERROR(pVM, VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED, "Failed to enable VT-x features in SMX mode");
1154
1155 case VERR_SVM_IN_USE:
1156 return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor");
1157 case VERR_SVM_NO_SVM:
1158 return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available");
1159 case VERR_SVM_DISABLED:
1160 return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS");
1161 }
1162 return VMSetError(pVM, pVM->hm.s.ForR3.rcInit, RT_SRC_POS, "HM ring-0 init failed: %Rrc", pVM->hm.s.ForR3.rcInit);
1163 }
1164
1165 /*
1166 * Enable VT-x or AMD-V on all host CPUs.
1167 */
1168 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_HM_ENABLE, 0, NULL);
1169 if (RT_FAILURE(rc))
1170 {
1171 LogRel(("HM: Failed to enable, error %Rrc\n", rc));
1172 HMR3CheckError(pVM, rc);
1173 return rc;
1174 }
1175
1176 /*
1177 * No TPR patching is required when the IO-APIC is not enabled for this VM.
1178 * (Main should have taken care of this already)
1179 */
1180 if (!PDMHasIoApic(pVM))
1181 {
1182 Assert(!pVM->hm.s.fTprPatchingAllowed); /* paranoia */
1183 pVM->hm.s.fTprPatchingAllowed = false;
1184 }
1185
1186 LogRel(("HM: fWorldSwitcher=%#x (fIbpbOnVmExit=%RTbool fIbpbOnVmEntry=%RTbool fL1dFlushOnVmEntry=%RTbool); fL1dFlushOnSched=%RTbool fMdsClearOnVmEntry=%RTbool\n",
1187 pVM->hm.s.ForR3.fWorldSwitcher, pVM->hm.s.fIbpbOnVmExit, pVM->hm.s.fIbpbOnVmEntry, pVM->hm.s.fL1dFlushOnVmEntry,
1188 pVM->hm.s.fL1dFlushOnSched, pVM->hm.s.fMdsClearOnVmEntry));
1189
1190 /*
1191 * Do the vendor specific initialization
1192 *
1193 * Note! We disable release log buffering here since we're doing relatively
1194 * lot of logging and doesn't want to hit the disk with each LogRel
1195 * statement.
1196 */
1197 AssertLogRelReturn(!pVM->hm.s.fInitialized, VERR_HM_IPE_5);
1198 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
1199 if (pVM->hm.s.vmx.fSupported)
1200 rc = hmR3InitFinalizeR0Intel(pVM);
1201 else
1202 rc = hmR3InitFinalizeR0Amd(pVM);
1203 LogRel((pVM->hm.s.fGlobalInit ? "HM: VT-x/AMD-V init method: Global\n"
1204 : "HM: VT-x/AMD-V init method: Local\n"));
1205 RTLogRelSetBuffering(fOldBuffered);
1206 pVM->hm.s.fInitialized = true;
1207
1208 return rc;
1209}
1210
1211
1212/**
1213 * @callback_method_impl{FNPDMVMMDEVHEAPNOTIFY}
1214 */
1215static DECLCALLBACK(void) hmR3VmmDevHeapNotify(PVM pVM, void *pvAllocation, RTGCPHYS GCPhysAllocation)
1216{
1217 NOREF(pVM);
1218 NOREF(pvAllocation);
1219 NOREF(GCPhysAllocation);
1220}
1221
1222
1223/**
1224 * Returns a description of the VMCS (and associated regions') memory type given the
1225 * IA32_VMX_BASIC MSR.
1226 *
1227 * @returns The descriptive memory type.
1228 * @param uMsrVmxBasic IA32_VMX_BASIC MSR value.
1229 */
1230static const char *hmR3VmxGetMemTypeDesc(uint64_t uMsrVmxBasic)
1231{
1232 uint8_t const uMemType = RT_BF_GET(uMsrVmxBasic, VMX_BF_BASIC_VMCS_MEM_TYPE);
1233 switch (uMemType)
1234 {
1235 case VMX_BASIC_MEM_TYPE_WB: return "Write Back (WB)";
1236 case VMX_BASIC_MEM_TYPE_UC: return "Uncacheable (UC)";
1237 }
1238 return "Unknown";
1239}
1240
1241
1242/**
1243 * Returns a single-line description of all the activity-states supported by the CPU
1244 * given the IA32_VMX_MISC MSR.
1245 *
1246 * @returns All supported activity states.
1247 * @param uMsrMisc IA32_VMX_MISC MSR value.
1248 */
1249static const char *hmR3VmxGetActivityStateAllDesc(uint64_t uMsrMisc)
1250{
1251 static const char * const s_apszActStates[] =
1252 {
1253 "",
1254 " ( HLT )",
1255 " ( SHUTDOWN )",
1256 " ( HLT SHUTDOWN )",
1257 " ( SIPI_WAIT )",
1258 " ( HLT SIPI_WAIT )",
1259 " ( SHUTDOWN SIPI_WAIT )",
1260 " ( HLT SHUTDOWN SIPI_WAIT )"
1261 };
1262 uint8_t const idxActStates = RT_BF_GET(uMsrMisc, VMX_BF_MISC_ACTIVITY_STATES);
1263 Assert(idxActStates < RT_ELEMENTS(s_apszActStates));
1264 return s_apszActStates[idxActStates];
1265}
1266
1267
1268/**
1269 * Reports MSR_IA32_FEATURE_CONTROL MSR to the log.
1270 *
1271 * @param fFeatMsr The feature control MSR value.
1272 */
1273static void hmR3VmxReportFeatCtlMsr(uint64_t fFeatMsr)
1274{
1275 uint64_t const val = fFeatMsr;
1276 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", val));
1277 HMVMX_REPORT_MSR_CAP(val, "LOCK", MSR_IA32_FEATURE_CONTROL_LOCK);
1278 HMVMX_REPORT_MSR_CAP(val, "SMX_VMXON", MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
1279 HMVMX_REPORT_MSR_CAP(val, "VMXON", MSR_IA32_FEATURE_CONTROL_VMXON);
1280 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN0", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_0);
1281 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN1", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_1);
1282 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN2", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_2);
1283 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN3", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_3);
1284 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN4", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_4);
1285 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN5", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_5);
1286 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN6", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_6);
1287 HMVMX_REPORT_MSR_CAP(val, "SENTER_GLOBAL_EN", MSR_IA32_FEATURE_CONTROL_SENTER_GLOBAL_EN);
1288 HMVMX_REPORT_MSR_CAP(val, "SGX_LAUNCH_EN", MSR_IA32_FEATURE_CONTROL_SGX_LAUNCH_EN);
1289 HMVMX_REPORT_MSR_CAP(val, "SGX_GLOBAL_EN", MSR_IA32_FEATURE_CONTROL_SGX_GLOBAL_EN);
1290 HMVMX_REPORT_MSR_CAP(val, "LMCE", MSR_IA32_FEATURE_CONTROL_LMCE);
1291 if (!(val & MSR_IA32_FEATURE_CONTROL_LOCK))
1292 LogRel(("HM: MSR_IA32_FEATURE_CONTROL lock bit not set, possibly bad hardware!\n"));
1293}
1294
1295
1296/**
1297 * Reports MSR_IA32_VMX_BASIC MSR to the log.
1298 *
1299 * @param uBasicMsr The VMX basic MSR value.
1300 */
1301static void hmR3VmxReportBasicMsr(uint64_t uBasicMsr)
1302{
1303 LogRel(("HM: MSR_IA32_VMX_BASIC = %#RX64\n", uBasicMsr));
1304 LogRel(("HM: VMCS id = %#x\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_ID)));
1305 LogRel(("HM: VMCS size = %u bytes\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_SIZE)));
1306 LogRel(("HM: VMCS physical address limit = %s\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_PHYSADDR_WIDTH) ?
1307 "< 4 GB" : "None"));
1308 LogRel(("HM: VMCS memory type = %s\n", hmR3VmxGetMemTypeDesc(uBasicMsr)));
1309 LogRel(("HM: Dual-monitor treatment support = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_DUAL_MON)));
1310 LogRel(("HM: OUTS & INS instruction-info = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_INS_OUTS)));
1311 LogRel(("HM: Supports true-capability MSRs = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_TRUE_CTLS)));
1312 LogRel(("HM: VM-entry Xcpt error-code optional = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_XCPT_ERRCODE)));
1313}
1314
1315
1316/**
1317 * Reports MSR_IA32_PINBASED_CTLS to the log.
1318 *
1319 * @param pVmxMsr Pointer to the VMX MSR.
1320 */
1321static void hmR3VmxReportPinBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1322{
1323 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1324 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1325 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %#RX64\n", pVmxMsr->u));
1326 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EXT_INT_EXIT", VMX_PIN_CTLS_EXT_INT_EXIT);
1327 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "NMI_EXIT", VMX_PIN_CTLS_NMI_EXIT);
1328 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRTUAL_NMI", VMX_PIN_CTLS_VIRT_NMI);
1329 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PREEMPT_TIMER", VMX_PIN_CTLS_PREEMPT_TIMER);
1330 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "POSTED_INT", VMX_PIN_CTLS_POSTED_INT);
1331}
1332
1333
1334/**
1335 * Reports MSR_IA32_VMX_PROCBASED_CTLS MSR to the log.
1336 *
1337 * @param pVmxMsr Pointer to the VMX MSR.
1338 */
1339static void hmR3VmxReportProcBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1340{
1341 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1342 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1343 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %#RX64\n", pVmxMsr->u));
1344 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INT_WINDOW_EXIT", VMX_PROC_CTLS_INT_WINDOW_EXIT);
1345 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TSC_OFFSETTING", VMX_PROC_CTLS_USE_TSC_OFFSETTING);
1346 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "HLT_EXIT", VMX_PROC_CTLS_HLT_EXIT);
1347 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INVLPG_EXIT", VMX_PROC_CTLS_INVLPG_EXIT);
1348 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MWAIT_EXIT", VMX_PROC_CTLS_MWAIT_EXIT);
1349 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDPMC_EXIT", VMX_PROC_CTLS_RDPMC_EXIT);
1350 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDTSC_EXIT", VMX_PROC_CTLS_RDTSC_EXIT);
1351 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR3_LOAD_EXIT", VMX_PROC_CTLS_CR3_LOAD_EXIT);
1352 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR3_STORE_EXIT", VMX_PROC_CTLS_CR3_STORE_EXIT);
1353 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TERTIARY_CTLS", VMX_PROC_CTLS_USE_TERTIARY_CTLS);
1354 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR8_LOAD_EXIT", VMX_PROC_CTLS_CR8_LOAD_EXIT);
1355 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR8_STORE_EXIT", VMX_PROC_CTLS_CR8_STORE_EXIT);
1356 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TPR_SHADOW", VMX_PROC_CTLS_USE_TPR_SHADOW);
1357 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "NMI_WINDOW_EXIT", VMX_PROC_CTLS_NMI_WINDOW_EXIT);
1358 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MOV_DR_EXIT", VMX_PROC_CTLS_MOV_DR_EXIT);
1359 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "UNCOND_IO_EXIT", VMX_PROC_CTLS_UNCOND_IO_EXIT);
1360 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_IO_BITMAPS", VMX_PROC_CTLS_USE_IO_BITMAPS);
1361 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MONITOR_TRAP_FLAG", VMX_PROC_CTLS_MONITOR_TRAP_FLAG);
1362 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_MSR_BITMAPS", VMX_PROC_CTLS_USE_MSR_BITMAPS);
1363 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MONITOR_EXIT", VMX_PROC_CTLS_MONITOR_EXIT);
1364 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PAUSE_EXIT", VMX_PROC_CTLS_PAUSE_EXIT);
1365 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_SECONDARY_CTLS", VMX_PROC_CTLS_USE_SECONDARY_CTLS);
1366}
1367
1368
1369/**
1370 * Reports MSR_IA32_VMX_PROCBASED_CTLS2 MSR to the log.
1371 *
1372 * @param pVmxMsr Pointer to the VMX MSR.
1373 */
1374static void hmR3VmxReportProcBasedCtls2Msr(PCVMXCTLSMSR pVmxMsr)
1375{
1376 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1377 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1378 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %#RX64\n", pVmxMsr->u));
1379 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_APIC_ACCESS", VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
1380 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EPT", VMX_PROC_CTLS2_EPT);
1381 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "DESC_TABLE_EXIT", VMX_PROC_CTLS2_DESC_TABLE_EXIT);
1382 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDTSCP", VMX_PROC_CTLS2_RDTSCP);
1383 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_X2APIC_MODE", VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
1384 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VPID", VMX_PROC_CTLS2_VPID);
1385 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "WBINVD_EXIT", VMX_PROC_CTLS2_WBINVD_EXIT);
1386 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "UNRESTRICTED_GUEST", VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
1387 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "APIC_REG_VIRT", VMX_PROC_CTLS2_APIC_REG_VIRT);
1388 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_INT_DELIVERY", VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
1389 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PAUSE_LOOP_EXIT", VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
1390 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDRAND_EXIT", VMX_PROC_CTLS2_RDRAND_EXIT);
1391 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INVPCID", VMX_PROC_CTLS2_INVPCID);
1392 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VMFUNC", VMX_PROC_CTLS2_VMFUNC);
1393 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VMCS_SHADOWING", VMX_PROC_CTLS2_VMCS_SHADOWING);
1394 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENCLS_EXIT", VMX_PROC_CTLS2_ENCLS_EXIT);
1395 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDSEED_EXIT", VMX_PROC_CTLS2_RDSEED_EXIT);
1396 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PML", VMX_PROC_CTLS2_PML);
1397 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EPT_XCPT_VE", VMX_PROC_CTLS2_EPT_XCPT_VE);
1398 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_PROC_CTLS2_CONCEAL_VMX_FROM_PT);
1399 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "XSAVES_XRSTORS", VMX_PROC_CTLS2_XSAVES_XRSTORS);
1400 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MODE_BASED_EPT_PERM", VMX_PROC_CTLS2_MODE_BASED_EPT_PERM);
1401 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SPP_EPT", VMX_PROC_CTLS2_SPP_EPT);
1402 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PT_EPT", VMX_PROC_CTLS2_PT_EPT);
1403 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "TSC_SCALING", VMX_PROC_CTLS2_TSC_SCALING);
1404 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USER_WAIT_PAUSE", VMX_PROC_CTLS2_USER_WAIT_PAUSE);
1405 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENCLV_EXIT", VMX_PROC_CTLS2_ENCLV_EXIT);
1406}
1407
1408
1409/**
1410 * Reports MSR_IA32_VMX_PROCBASED_CTLS3 MSR to the log.
1411 *
1412 * @param uProcCtls3 The tertiary processor-based VM-execution control MSR.
1413 */
1414static void hmR3VmxReportProcBasedCtls3Msr(uint64_t uProcCtls3)
1415{
1416 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS3 = %#RX64\n", uProcCtls3));
1417 LogRel(("HM: LOADIWKEY_EXIT = %RTbool\n", RT_BOOL(uProcCtls3 & VMX_PROC_CTLS3_LOADIWKEY_EXIT)));
1418}
1419
1420
1421/**
1422 * Reports MSR_IA32_VMX_ENTRY_CTLS to the log.
1423 *
1424 * @param pVmxMsr Pointer to the VMX MSR.
1425 */
1426static void hmR3VmxReportEntryCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1427{
1428 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1429 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1430 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %#RX64\n", pVmxMsr->u));
1431 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_DEBUG", VMX_ENTRY_CTLS_LOAD_DEBUG);
1432 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "IA32E_MODE_GUEST", VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
1433 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENTRY_TO_SMM", VMX_ENTRY_CTLS_ENTRY_TO_SMM);
1434 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "DEACTIVATE_DUAL_MON", VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON);
1435 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PERF_MSR", VMX_ENTRY_CTLS_LOAD_PERF_MSR);
1436 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PAT_MSR", VMX_ENTRY_CTLS_LOAD_PAT_MSR);
1437 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_EFER_MSR", VMX_ENTRY_CTLS_LOAD_EFER_MSR);
1438 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_BNDCFGS_MSR", VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR);
1439 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT);
1440 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_RTIT_CTL_MSR", VMX_ENTRY_CTLS_LOAD_RTIT_CTL_MSR);
1441 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_CET_STATE", VMX_ENTRY_CTLS_LOAD_CET_STATE);
1442 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PKRS_MSR", VMX_ENTRY_CTLS_LOAD_PKRS_MSR);
1443}
1444
1445
1446/**
1447 * Reports MSR_IA32_VMX_EXIT_CTLS to the log.
1448 *
1449 * @param pVmxMsr Pointer to the VMX MSR.
1450 */
1451static void hmR3VmxReportExitCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1452{
1453 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1454 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1455 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %#RX64\n", pVmxMsr->u));
1456 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_DEBUG", VMX_EXIT_CTLS_SAVE_DEBUG);
1457 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "HOST_ADDR_SPACE_SIZE", VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
1458 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PERF_MSR", VMX_EXIT_CTLS_LOAD_PERF_MSR);
1459 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ACK_EXT_INT", VMX_EXIT_CTLS_ACK_EXT_INT);
1460 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_PAT_MSR", VMX_EXIT_CTLS_SAVE_PAT_MSR);
1461 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PAT_MSR", VMX_EXIT_CTLS_LOAD_PAT_MSR);
1462 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_EFER_MSR", VMX_EXIT_CTLS_SAVE_EFER_MSR);
1463 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_EFER_MSR", VMX_EXIT_CTLS_LOAD_EFER_MSR);
1464 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_PREEMPT_TIMER", VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1465 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CLEAR_BNDCFGS_MSR", VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR);
1466 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT);
1467 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CLEAR_RTIT_CTL_MSR", VMX_EXIT_CTLS_CLEAR_RTIT_CTL_MSR);
1468 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_CET_STATE", VMX_EXIT_CTLS_LOAD_CET_STATE);
1469 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PKRS_MSR", VMX_EXIT_CTLS_LOAD_PKRS_MSR);
1470}
1471
1472
1473/**
1474 * Reports MSR_IA32_VMX_EPT_VPID_CAP MSR to the log.
1475 *
1476 * @param fCaps The VMX EPT/VPID capability MSR value.
1477 */
1478static void hmR3VmxReportEptVpidCapsMsr(uint64_t fCaps)
1479{
1480 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %#RX64\n", fCaps));
1481 HMVMX_REPORT_MSR_CAP(fCaps, "RWX_X_ONLY", MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY);
1482 HMVMX_REPORT_MSR_CAP(fCaps, "PAGE_WALK_LENGTH_4", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4);
1483 HMVMX_REPORT_MSR_CAP(fCaps, "PAGE_WALK_LENGTH_5", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_5);
1484 HMVMX_REPORT_MSR_CAP(fCaps, "MEMTYPE_UC", MSR_IA32_VMX_EPT_VPID_CAP_MEMTYPE_UC);
1485 HMVMX_REPORT_MSR_CAP(fCaps, "MEMTYPE_WB", MSR_IA32_VMX_EPT_VPID_CAP_MEMTYPE_WB);
1486 HMVMX_REPORT_MSR_CAP(fCaps, "PDE_2M", MSR_IA32_VMX_EPT_VPID_CAP_PDE_2M);
1487 HMVMX_REPORT_MSR_CAP(fCaps, "PDPTE_1G", MSR_IA32_VMX_EPT_VPID_CAP_PDPTE_1G);
1488 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT);
1489 HMVMX_REPORT_MSR_CAP(fCaps, "ACCESS_DIRTY", MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY);
1490 HMVMX_REPORT_MSR_CAP(fCaps, "ADVEXITINFO_EPT_VIOLATION", MSR_IA32_VMX_EPT_VPID_CAP_ADVEXITINFO_EPT_VIOLATION);
1491 HMVMX_REPORT_MSR_CAP(fCaps, "SUPER_SHW_STACK", MSR_IA32_VMX_EPT_VPID_CAP_SUPER_SHW_STACK);
1492 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT);
1493 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
1494 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID);
1495 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_INDIV_ADDR", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
1496 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT);
1497 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS);
1498 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS);
1499}
1500
1501
1502/**
1503 * Reports MSR_IA32_VMX_MISC MSR to the log.
1504 *
1505 * @param pVM Pointer to the VM.
1506 * @param fMisc The VMX misc. MSR value.
1507 */
1508static void hmR3VmxReportMiscMsr(PVM pVM, uint64_t fMisc)
1509{
1510 LogRel(("HM: MSR_IA32_VMX_MISC = %#RX64\n", fMisc));
1511 uint8_t const cPreemptTimerShift = RT_BF_GET(fMisc, VMX_BF_MISC_PREEMPT_TIMER_TSC);
1512 if (cPreemptTimerShift == pVM->hm.s.vmx.cPreemptTimerShift)
1513 LogRel(("HM: PREEMPT_TIMER_TSC = %#x\n", cPreemptTimerShift));
1514 else
1515 {
1516 LogRel(("HM: PREEMPT_TIMER_TSC = %#x - erratum detected, using %#x instead\n", cPreemptTimerShift,
1517 pVM->hm.s.vmx.cPreemptTimerShift));
1518 }
1519 LogRel(("HM: EXIT_SAVE_EFER_LMA = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_EXIT_SAVE_EFER_LMA)));
1520 LogRel(("HM: ACTIVITY_STATES = %#x%s\n", RT_BF_GET(fMisc, VMX_BF_MISC_ACTIVITY_STATES),
1521 hmR3VmxGetActivityStateAllDesc(fMisc)));
1522 LogRel(("HM: INTEL_PT = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_INTEL_PT)));
1523 LogRel(("HM: SMM_READ_SMBASE_MSR = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_SMM_READ_SMBASE_MSR)));
1524 LogRel(("HM: CR3_TARGET = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_CR3_TARGET)));
1525 LogRel(("HM: MAX_MSR = %#x ( %u )\n", RT_BF_GET(fMisc, VMX_BF_MISC_MAX_MSRS),
1526 VMX_MISC_MAX_MSRS(fMisc)));
1527 LogRel(("HM: VMXOFF_BLOCK_SMI = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_VMXOFF_BLOCK_SMI)));
1528 LogRel(("HM: VMWRITE_ALL = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_VMWRITE_ALL)));
1529 LogRel(("HM: ENTRY_INJECT_SOFT_INT = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_ENTRY_INJECT_SOFT_INT)));
1530 LogRel(("HM: MSEG_ID = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_MSEG_ID)));
1531}
1532
1533
1534/**
1535 * Reports MSR_IA32_VMX_VMCS_ENUM MSR to the log.
1536 *
1537 * @param uVmcsEnum The VMX VMCS enum MSR value.
1538 */
1539static void hmR3VmxReportVmcsEnumMsr(uint64_t uVmcsEnum)
1540{
1541 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %#RX64\n", uVmcsEnum));
1542 LogRel(("HM: HIGHEST_IDX = %#x\n", RT_BF_GET(uVmcsEnum, VMX_BF_VMCS_ENUM_HIGHEST_IDX)));
1543}
1544
1545
1546/**
1547 * Reports MSR_IA32_VMX_VMFUNC MSR to the log.
1548 *
1549 * @param uVmFunc The VMX VMFUNC MSR value.
1550 */
1551static void hmR3VmxReportVmFuncMsr(uint64_t uVmFunc)
1552{
1553 LogRel(("HM: MSR_IA32_VMX_VMFUNC = %#RX64\n", uVmFunc));
1554 HMVMX_REPORT_ALLOWED_FEAT(uVmFunc, "EPTP_SWITCHING", RT_BF_GET(uVmFunc, VMX_BF_VMFUNC_EPTP_SWITCHING));
1555}
1556
1557
1558/**
1559 * Reports VMX CR0, CR4 fixed MSRs.
1560 *
1561 * @param pMsrs Pointer to the VMX MSRs.
1562 */
1563static void hmR3VmxReportCrFixedMsrs(PVMXMSRS pMsrs)
1564{
1565 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %#RX64\n", pMsrs->u64Cr0Fixed0));
1566 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %#RX64\n", pMsrs->u64Cr0Fixed1));
1567 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %#RX64\n", pMsrs->u64Cr4Fixed0));
1568 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %#RX64\n", pMsrs->u64Cr4Fixed1));
1569}
1570
1571
1572/**
1573 * Finish VT-x initialization (after ring-0 init).
1574 *
1575 * @returns VBox status code.
1576 * @param pVM The cross context VM structure.
1577 */
1578static int hmR3InitFinalizeR0Intel(PVM pVM)
1579{
1580 int rc;
1581
1582 LogFunc(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
1583 AssertLogRelReturn(pVM->hm.s.ForR3.vmx.u64HostFeatCtrl != 0, VERR_HM_IPE_4);
1584
1585 LogRel(("HM: Using VT-x implementation 3.0\n"));
1586 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoopsCfg));
1587 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostCr4));
1588 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostMsrEfer));
1589 LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostSmmMonitorCtl));
1590
1591 hmR3VmxReportFeatCtlMsr(pVM->hm.s.ForR3.vmx.u64HostFeatCtrl);
1592 hmR3VmxReportBasicMsr(pVM->hm.s.ForR3.vmx.Msrs.u64Basic);
1593
1594 hmR3VmxReportPinBasedCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.PinCtls);
1595 hmR3VmxReportProcBasedCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.ProcCtls);
1596 if (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1597 hmR3VmxReportProcBasedCtls2Msr(&pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2);
1598 if (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1599 hmR3VmxReportProcBasedCtls3Msr(pVM->hm.s.ForR3.vmx.Msrs.u64ProcCtls3);
1600
1601 hmR3VmxReportEntryCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.EntryCtls);
1602 hmR3VmxReportExitCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.ExitCtls);
1603
1604 if (RT_BF_GET(pVM->hm.s.ForR3.vmx.Msrs.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
1605 {
1606 /* We don't extensively dump the true capability MSRs as we don't use them, see @bugref{9180#c5}. */
1607 LogRel(("HM: MSR_IA32_VMX_TRUE_PINBASED_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TruePinCtls));
1608 LogRel(("HM: MSR_IA32_VMX_TRUE_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueProcCtls));
1609 LogRel(("HM: MSR_IA32_VMX_TRUE_ENTRY_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueEntryCtls));
1610 LogRel(("HM: MSR_IA32_VMX_TRUE_EXIT_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueExitCtls));
1611 }
1612
1613 hmR3VmxReportMiscMsr(pVM, pVM->hm.s.ForR3.vmx.Msrs.u64Misc);
1614 hmR3VmxReportVmcsEnumMsr(pVM->hm.s.ForR3.vmx.Msrs.u64VmcsEnum);
1615 if (pVM->hm.s.ForR3.vmx.Msrs.u64EptVpidCaps)
1616 hmR3VmxReportEptVpidCapsMsr(pVM->hm.s.ForR3.vmx.Msrs.u64EptVpidCaps);
1617 if (pVM->hm.s.ForR3.vmx.Msrs.u64VmFunc)
1618 hmR3VmxReportVmFuncMsr(pVM->hm.s.ForR3.vmx.Msrs.u64VmFunc);
1619 hmR3VmxReportCrFixedMsrs(&pVM->hm.s.ForR3.vmx.Msrs);
1620
1621#ifdef TODO_9217_VMCSINFO
1622 LogRel(("HM: APIC-access page physaddr = %#RHp\n", pVM->hm.s.vmx.HCPhysApicAccess));
1623 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1624 {
1625 PCVMXVMCSINFOSHARED pVmcsInfo = &pVM->apCpusR3[idCpu]->hm.s.vmx.VmcsInfo;
1626 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", idCpu, pVmcsInfo->HCPhysMsrBitmap));
1627 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", idCpu, pVmcsInfo->HCPhysVmcs));
1628 }
1629#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1630 if (pVM->cpum.ro.GuestFeatures.fVmx)
1631 {
1632 LogRel(("HM: Nested-guest:\n"));
1633 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1634 {
1635 PCVMXVMCSINFOSHARED pVmcsInfoNstGst = &pVM->apCpusR3[idCpu]->hm.s.vmx.VmcsInfoNstGst;
1636 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", idCpu, pVmcsInfoNstGst->HCPhysMsrBitmap));
1637 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", idCpu, pVmcsInfoNstGst->HCPhysVmcs));
1638 }
1639 }
1640#endif
1641#endif /* TODO_9217_VMCSINFO */
1642
1643 /*
1644 * EPT and unrestricted guest execution are determined in HMR3Init, verify the sanity of that.
1645 */
1646 AssertLogRelReturn( !pVM->hm.s.fNestedPagingCfg
1647 || (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_EPT),
1648 VERR_HM_IPE_1);
1649 AssertLogRelReturn( !pVM->hm.s.vmx.fUnrestrictedGuestCfg
1650 || ( (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
1651 && pVM->hm.s.fNestedPagingCfg),
1652 VERR_HM_IPE_1);
1653
1654 /*
1655 * Disallow RDTSCP in the guest if there is no secondary process-based VM execution controls as otherwise
1656 * RDTSCP would cause a #UD. There might be no CPUs out there where this happens, as RDTSCP was introduced
1657 * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel...
1658 */
1659 if ( !(pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1660 && CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
1661 {
1662 CPUMR3ClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
1663 LogRel(("HM: Disabled RDTSCP\n"));
1664 }
1665
1666 if (!pVM->hm.s.vmx.fUnrestrictedGuestCfg)
1667 {
1668 /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
1669 rc = PDMR3VmmDevHeapAlloc(pVM, HM_VTX_TOTAL_DEVHEAP_MEM, hmR3VmmDevHeapNotify, (RTR3PTR *)&pVM->hm.s.vmx.pRealModeTSS);
1670 if (RT_SUCCESS(rc))
1671 {
1672 /* The IO bitmap starts right after the virtual interrupt redirection bitmap.
1673 Refer Intel spec. 20.3.3 "Software Interrupt Handling in Virtual-8086 mode"
1674 esp. Figure 20-5.*/
1675 ASMMemZero32(pVM->hm.s.vmx.pRealModeTSS, sizeof(*pVM->hm.s.vmx.pRealModeTSS));
1676 pVM->hm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hm.s.vmx.pRealModeTSS);
1677
1678 /* Bit set to 0 means software interrupts are redirected to the
1679 8086 program interrupt handler rather than switching to
1680 protected-mode handler. */
1681 memset(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap, 0, sizeof(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap));
1682
1683 /* Allow all port IO, so that port IO instructions do not cause
1684 exceptions and would instead cause a VM-exit (based on VT-x's
1685 IO bitmap which we currently configure to always cause an exit). */
1686 memset(pVM->hm.s.vmx.pRealModeTSS + 1, 0, X86_PAGE_SIZE * 2);
1687 *((unsigned char *)pVM->hm.s.vmx.pRealModeTSS + HM_VTX_TSS_SIZE - 2) = 0xff;
1688
1689 /*
1690 * Construct a 1024 element page directory with 4 MB pages for the identity mapped
1691 * page table used in real and protected mode without paging with EPT.
1692 */
1693 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hm.s.vmx.pRealModeTSS + X86_PAGE_SIZE * 3);
1694 for (uint32_t i = 0; i < X86_PG_ENTRIES; i++)
1695 {
1696 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
1697 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US
1698 | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS
1699 | X86_PDE4M_G;
1700 }
1701
1702 /* We convert it here every time as PCI regions could be reconfigured. */
1703 if (PDMVmmDevHeapIsEnabled(pVM))
1704 {
1705 RTGCPHYS GCPhys;
1706 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
1707 AssertRCReturn(rc, rc);
1708 LogRel(("HM: Real Mode TSS guest physaddr = %#RGp\n", GCPhys));
1709
1710 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
1711 AssertRCReturn(rc, rc);
1712 LogRel(("HM: Non-Paging Mode EPT CR3 = %#RGp\n", GCPhys));
1713 }
1714 }
1715 else
1716 {
1717 LogRel(("HM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
1718 pVM->hm.s.vmx.pRealModeTSS = NULL;
1719 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = NULL;
1720 return VMSetError(pVM, rc, RT_SRC_POS,
1721 "HM failure: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)", rc);
1722 }
1723 }
1724
1725 LogRel((pVM->hm.s.fAllow64BitGuestsCfg ? "HM: Guest support: 32-bit and 64-bit\n"
1726 : "HM: Guest support: 32-bit only\n"));
1727
1728 /*
1729 * Call ring-0 to set up the VM.
1730 */
1731 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /* idCpu */, VMMR0_DO_HM_SETUP_VM, 0 /* u64Arg */, NULL /* pReqHdr */);
1732 if (rc != VINF_SUCCESS)
1733 {
1734 LogRel(("HM: VMX setup failed with rc=%Rrc!\n", rc));
1735 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1736 {
1737 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1738 LogRel(("HM: CPU[%u] Last instruction error %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32InstrError));
1739 LogRel(("HM: CPU[%u] HM error %#x (%u)\n", idCpu, pVCpu->hm.s.u32HMError, pVCpu->hm.s.u32HMError));
1740 }
1741 HMR3CheckError(pVM, rc);
1742 return VMSetError(pVM, rc, RT_SRC_POS, "VT-x setup failed: %Rrc", rc);
1743 }
1744
1745 LogRel(("HM: Supports VMCS EFER fields = %RTbool\n", pVM->hm.s.ForR3.vmx.fSupportsVmcsEfer));
1746 LogRel(("HM: Enabled VMX\n"));
1747 pVM->hm.s.vmx.fEnabled = true;
1748
1749 hmR3DisableRawMode(pVM); /** @todo make this go away! */
1750
1751 /*
1752 * Log configuration details.
1753 */
1754 if (pVM->hm.s.fNestedPagingCfg)
1755 {
1756 LogRel(("HM: Enabled nested paging\n"));
1757 if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_SINGLE_CONTEXT)
1758 LogRel(("HM: EPT flush type = Single context\n"));
1759 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_ALL_CONTEXTS)
1760 LogRel(("HM: EPT flush type = All contexts\n"));
1761 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_NOT_SUPPORTED)
1762 LogRel(("HM: EPT flush type = Not supported\n"));
1763 else
1764 LogRel(("HM: EPT flush type = %#x\n", pVM->hm.s.ForR3.vmx.enmTlbFlushEpt));
1765
1766 if (pVM->hm.s.vmx.fUnrestrictedGuestCfg)
1767 LogRel(("HM: Enabled unrestricted guest execution\n"));
1768
1769 if (pVM->hm.s.fLargePages)
1770 {
1771 /* Use large (2 MB) pages for our EPT PDEs where possible. */
1772 PGMSetLargePageUsage(pVM, true);
1773 LogRel(("HM: Enabled large page support\n"));
1774 }
1775 }
1776 else
1777 Assert(!pVM->hm.s.vmx.fUnrestrictedGuestCfg);
1778
1779 if (pVM->hm.s.ForR3.vmx.fVpid)
1780 {
1781 LogRel(("HM: Enabled VPID\n"));
1782 if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_INDIV_ADDR)
1783 LogRel(("HM: VPID flush type = Individual addresses\n"));
1784 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
1785 LogRel(("HM: VPID flush type = Single context\n"));
1786 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS)
1787 LogRel(("HM: VPID flush type = All contexts\n"));
1788 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1789 LogRel(("HM: VPID flush type = Single context retain globals\n"));
1790 else
1791 LogRel(("HM: VPID flush type = %#x\n", pVM->hm.s.ForR3.vmx.enmTlbFlushVpid));
1792 }
1793 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_NOT_SUPPORTED)
1794 LogRel(("HM: Ignoring VPID capabilities of CPU\n"));
1795
1796 if (pVM->hm.s.vmx.fUsePreemptTimerCfg)
1797 LogRel(("HM: Enabled VMX-preemption timer (cPreemptTimerShift=%u)\n", pVM->hm.s.vmx.cPreemptTimerShift));
1798 else
1799 LogRel(("HM: Disabled VMX-preemption timer\n"));
1800
1801 if (pVM->hm.s.fVirtApicRegs)
1802 LogRel(("HM: Enabled APIC-register virtualization support\n"));
1803
1804 if (pVM->hm.s.fPostedIntrs)
1805 LogRel(("HM: Enabled posted-interrupt processing support\n"));
1806
1807 if (pVM->hm.s.ForR3.vmx.fUseVmcsShadowing)
1808 {
1809 bool const fFullVmcsShadow = RT_BOOL(pVM->hm.s.ForR3.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL);
1810 LogRel(("HM: Enabled %s VMCS shadowing\n", fFullVmcsShadow ? "full" : "partial"));
1811 }
1812
1813 return VINF_SUCCESS;
1814}
1815
1816
1817/**
1818 * Finish AMD-V initialization (after ring-0 init).
1819 *
1820 * @returns VBox status code.
1821 * @param pVM The cross context VM structure.
1822 */
1823static int hmR3InitFinalizeR0Amd(PVM pVM)
1824{
1825 LogFunc(("pVM->hm.s.svm.fSupported = %d\n", pVM->hm.s.svm.fSupported));
1826
1827 LogRel(("HM: Using AMD-V implementation 2.0\n"));
1828
1829#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
1830 uint32_t u32Family;
1831 uint32_t u32Model;
1832 uint32_t u32Stepping;
1833 if (HMIsSubjectToSvmErratum170(&u32Family, &u32Model, &u32Stepping))
1834 LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
1835#endif
1836 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoopsCfg));
1837 LogRel(("HM: AMD HWCR MSR = %#RX64\n", pVM->hm.s.ForR3.svm.u64MsrHwcr));
1838 LogRel(("HM: AMD-V revision = %#x\n", pVM->hm.s.ForR3.svm.u32Rev));
1839 LogRel(("HM: AMD-V max ASID = %RU32\n", pVM->hm.s.ForR3.uMaxAsid));
1840 LogRel(("HM: AMD-V features = %#x\n", pVM->hm.s.ForR3.svm.fFeatures));
1841
1842 /*
1843 * Enumerate AMD-V features.
1844 */
1845 static const struct { uint32_t fFlag; const char *pszName; } s_aSvmFeatures[] =
1846 {
1847#define HMSVM_REPORT_FEATURE(a_StrDesc, a_Define) { a_Define, a_StrDesc }
1848 HMSVM_REPORT_FEATURE("NESTED_PAGING", X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
1849 HMSVM_REPORT_FEATURE("LBR_VIRT", X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
1850 HMSVM_REPORT_FEATURE("SVM_LOCK", X86_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
1851 HMSVM_REPORT_FEATURE("NRIP_SAVE", X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
1852 HMSVM_REPORT_FEATURE("TSC_RATE_MSR", X86_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
1853 HMSVM_REPORT_FEATURE("VMCB_CLEAN", X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
1854 HMSVM_REPORT_FEATURE("FLUSH_BY_ASID", X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
1855 HMSVM_REPORT_FEATURE("DECODE_ASSISTS", X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS),
1856 HMSVM_REPORT_FEATURE("PAUSE_FILTER", X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
1857 HMSVM_REPORT_FEATURE("PAUSE_FILTER_THRESHOLD", X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD),
1858 HMSVM_REPORT_FEATURE("AVIC", X86_CPUID_SVM_FEATURE_EDX_AVIC),
1859 HMSVM_REPORT_FEATURE("VIRT_VMSAVE_VMLOAD", X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD),
1860 HMSVM_REPORT_FEATURE("VGIF", X86_CPUID_SVM_FEATURE_EDX_VGIF),
1861 HMSVM_REPORT_FEATURE("GMET", X86_CPUID_SVM_FEATURE_EDX_GMET),
1862 HMSVM_REPORT_FEATURE("SSSCHECK", X86_CPUID_SVM_FEATURE_EDX_SSSCHECK),
1863 HMSVM_REPORT_FEATURE("SPEC_CTRL", X86_CPUID_SVM_FEATURE_EDX_SPEC_CTRL),
1864 HMSVM_REPORT_FEATURE("HOST_MCE_OVERRIDE", X86_CPUID_SVM_FEATURE_EDX_HOST_MCE_OVERRIDE),
1865 HMSVM_REPORT_FEATURE("TLBICTL", X86_CPUID_SVM_FEATURE_EDX_TLBICTL),
1866#undef HMSVM_REPORT_FEATURE
1867 };
1868
1869 uint32_t fSvmFeatures = pVM->hm.s.ForR3.svm.fFeatures;
1870 for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++)
1871 if (fSvmFeatures & s_aSvmFeatures[i].fFlag)
1872 {
1873 LogRel(("HM: %s\n", s_aSvmFeatures[i].pszName));
1874 fSvmFeatures &= ~s_aSvmFeatures[i].fFlag;
1875 }
1876 if (fSvmFeatures)
1877 for (unsigned iBit = 0; iBit < 32; iBit++)
1878 if (RT_BIT_32(iBit) & fSvmFeatures)
1879 LogRel(("HM: Reserved bit %u\n", iBit));
1880
1881 /*
1882 * Nested paging is determined in HMR3Init, verify the sanity of that.
1883 */
1884 AssertLogRelReturn( !pVM->hm.s.fNestedPagingCfg
1885 || (pVM->hm.s.ForR3.svm.fFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
1886 VERR_HM_IPE_1);
1887
1888#if 0
1889 /** @todo Add and query IPRT API for host OS support for posted-interrupt IPI
1890 * here. */
1891 if (RTR0IsPostIpiSupport())
1892 pVM->hm.s.fPostedIntrs = true;
1893#endif
1894
1895 /*
1896 * Determine whether we need to intercept #UD in SVM mode for emulating
1897 * intel SYSENTER/SYSEXIT on AMD64, as these instructions results in #UD
1898 * when executed in long-mode. This is only really applicable when
1899 * non-default CPU profiles are in effect, i.e. guest vendor differs
1900 * from the host one.
1901 */
1902 if (CPUMGetGuestCpuVendor(pVM) != CPUMGetHostCpuVendor(pVM))
1903 switch (CPUMGetGuestCpuVendor(pVM))
1904 {
1905 case CPUMCPUVENDOR_INTEL:
1906 case CPUMCPUVENDOR_VIA: /*?*/
1907 case CPUMCPUVENDOR_SHANGHAI: /*?*/
1908 switch (CPUMGetHostCpuVendor(pVM))
1909 {
1910 case CPUMCPUVENDOR_AMD:
1911 case CPUMCPUVENDOR_HYGON:
1912 if (pVM->hm.s.fAllow64BitGuestsCfg)
1913 {
1914 LogRel(("HM: Intercepting #UD for emulating SYSENTER/SYSEXIT in long mode.\n"));
1915 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1916 pVM->apCpusR3[idCpu]->hm.s.svm.fEmulateLongModeSysEnterExit = true;
1917 }
1918 break;
1919 default: break;
1920 }
1921 default: break;
1922 }
1923
1924 /*
1925 * Call ring-0 to set up the VM.
1926 */
1927 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_HM_SETUP_VM, 0, NULL);
1928 if (rc != VINF_SUCCESS)
1929 {
1930 AssertMsgFailed(("%Rrc\n", rc));
1931 LogRel(("HM: AMD-V setup failed with rc=%Rrc!\n", rc));
1932 return VMSetError(pVM, rc, RT_SRC_POS, "AMD-V setup failed: %Rrc", rc);
1933 }
1934
1935 LogRel(("HM: Enabled SVM\n"));
1936 pVM->hm.s.svm.fEnabled = true;
1937
1938 if (pVM->hm.s.fNestedPagingCfg)
1939 {
1940 LogRel(("HM: Enabled nested paging\n"));
1941
1942 /*
1943 * Enable large pages (2 MB) if applicable.
1944 */
1945 if (pVM->hm.s.fLargePages)
1946 {
1947 PGMSetLargePageUsage(pVM, true);
1948 LogRel(("HM: Enabled large page support\n"));
1949 }
1950 }
1951
1952 if (pVM->hm.s.fVirtApicRegs)
1953 LogRel(("HM: Enabled APIC-register virtualization support\n"));
1954
1955 if (pVM->hm.s.fPostedIntrs)
1956 LogRel(("HM: Enabled posted-interrupt processing support\n"));
1957
1958 hmR3DisableRawMode(pVM);
1959
1960 LogRel((pVM->hm.s.fTprPatchingAllowed ? "HM: Enabled TPR patching\n"
1961 : "HM: Disabled TPR patching\n"));
1962
1963 LogRel((pVM->hm.s.fAllow64BitGuestsCfg ? "HM: Guest support: 32-bit and 64-bit\n"
1964 : "HM: Guest support: 32-bit only\n"));
1965 return VINF_SUCCESS;
1966}
1967
1968
1969/**
1970 * Applies relocations to data and code managed by this
1971 * component. This function will be called at init and
1972 * whenever the VMM need to relocate it self inside the GC.
1973 *
1974 * @param pVM The cross context VM structure.
1975 */
1976VMMR3_INT_DECL(void) HMR3Relocate(PVM pVM)
1977{
1978 /* Fetch the current paging mode during the relocate callback during state loading. */
1979 if (VMR3GetState(pVM) == VMSTATE_LOADING)
1980 {
1981 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1982 {
1983 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1984 pVCpu->hm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
1985 }
1986 }
1987}
1988
1989
1990/**
1991 * Terminates the HM.
1992 *
1993 * Termination means cleaning up and freeing all resources,
1994 * the VM itself is, at this point, powered off or suspended.
1995 *
1996 * @returns VBox status code.
1997 * @param pVM The cross context VM structure.
1998 */
1999VMMR3_INT_DECL(int) HMR3Term(PVM pVM)
2000{
2001 if (pVM->hm.s.vmx.pRealModeTSS)
2002 {
2003 PDMR3VmmDevHeapFree(pVM, pVM->hm.s.vmx.pRealModeTSS);
2004 pVM->hm.s.vmx.pRealModeTSS = 0;
2005 }
2006 hmR3TermCPU(pVM);
2007 return 0;
2008}
2009
2010
2011/**
2012 * Terminates the per-VCPU HM.
2013 *
2014 * @returns VBox status code.
2015 * @param pVM The cross context VM structure.
2016 */
2017static int hmR3TermCPU(PVM pVM)
2018{
2019 RT_NOREF(pVM);
2020 return VINF_SUCCESS;
2021}
2022
2023
2024/**
2025 * Resets a virtual CPU.
2026 *
2027 * Used by HMR3Reset and CPU hot plugging.
2028 *
2029 * @param pVCpu The cross context virtual CPU structure to reset.
2030 */
2031VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu)
2032{
2033 /* Sync. entire state on VM reset ring-0 re-entry. It's safe to reset
2034 the HM flags here, all other EMTs are in ring-3. See VMR3Reset(). */
2035 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
2036
2037 pVCpu->hm.s.fActive = false;
2038 pVCpu->hm.s.Event.fPending = false;
2039 pVCpu->hm.s.vmx.u64GstMsrApicBase = 0;
2040 pVCpu->hm.s.vmx.VmcsInfo.fWasInRealMode = true;
2041#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2042 if (pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmx)
2043 pVCpu->hm.s.vmx.VmcsInfoNstGst.fWasInRealMode = true;
2044#endif
2045}
2046
2047
2048/**
2049 * The VM is being reset.
2050 *
2051 * For the HM component this means that any GDT/LDT/TSS monitors
2052 * needs to be removed.
2053 *
2054 * @param pVM The cross context VM structure.
2055 */
2056VMMR3_INT_DECL(void) HMR3Reset(PVM pVM)
2057{
2058 LogFlow(("HMR3Reset:\n"));
2059
2060 if (HMIsEnabled(pVM))
2061 hmR3DisableRawMode(pVM);
2062
2063 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2064 HMR3ResetCpu(pVM->apCpusR3[idCpu]);
2065
2066 /* Clear all patch information. */
2067 pVM->hm.s.pGuestPatchMem = 0;
2068 pVM->hm.s.pFreeGuestPatchMem = 0;
2069 pVM->hm.s.cbGuestPatchMem = 0;
2070 pVM->hm.s.cPatches = 0;
2071 pVM->hm.s.PatchTree = 0;
2072 pVM->hm.s.fTprPatchingActive = false;
2073 ASMMemZero32(pVM->hm.s.aPatches, sizeof(pVM->hm.s.aPatches));
2074}
2075
2076
2077/**
2078 * Callback to patch a TPR instruction (vmmcall or mov cr8).
2079 *
2080 * @returns VBox strict status code.
2081 * @param pVM The cross context VM structure.
2082 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2083 * @param pvUser Unused.
2084 */
2085static DECLCALLBACK(VBOXSTRICTRC) hmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
2086{
2087 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2088
2089 /* Only execute the handler on the VCPU the original patch request was issued. */
2090 if (pVCpu->idCpu != idCpu)
2091 return VINF_SUCCESS;
2092
2093 Log(("hmR3RemovePatches\n"));
2094 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
2095 {
2096 uint8_t abInstr[15];
2097 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
2098 RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
2099 int rc;
2100
2101#ifdef LOG_ENABLED
2102 char szOutput[256];
2103 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2104 szOutput, sizeof(szOutput), NULL);
2105 if (RT_SUCCESS(rc))
2106 Log(("Patched instr: %s\n", szOutput));
2107#endif
2108
2109 /* Check if the instruction is still the same. */
2110 rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pInstrGC, pPatch->cbNewOp);
2111 if (rc != VINF_SUCCESS)
2112 {
2113 Log(("Patched code removed? (rc=%Rrc0\n", rc));
2114 continue; /* swapped out or otherwise removed; skip it. */
2115 }
2116
2117 if (memcmp(abInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
2118 {
2119 Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
2120 continue; /* skip it. */
2121 }
2122
2123 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
2124 AssertRC(rc);
2125
2126#ifdef LOG_ENABLED
2127 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2128 szOutput, sizeof(szOutput), NULL);
2129 if (RT_SUCCESS(rc))
2130 Log(("Original instr: %s\n", szOutput));
2131#endif
2132 }
2133 pVM->hm.s.cPatches = 0;
2134 pVM->hm.s.PatchTree = 0;
2135 pVM->hm.s.pFreeGuestPatchMem = pVM->hm.s.pGuestPatchMem;
2136 pVM->hm.s.fTprPatchingActive = false;
2137 return VINF_SUCCESS;
2138}
2139
2140
2141/**
2142 * Worker for enabling patching in a VT-x/AMD-V guest.
2143 *
2144 * @returns VBox status code.
2145 * @param pVM The cross context VM structure.
2146 * @param idCpu VCPU to execute hmR3RemovePatches on.
2147 * @param pPatchMem Patch memory range.
2148 * @param cbPatchMem Size of the memory range.
2149 */
2150static DECLCALLBACK(int) hmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
2151{
2152 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches, (void *)(uintptr_t)idCpu);
2153 AssertRC(rc);
2154
2155 pVM->hm.s.pGuestPatchMem = pPatchMem;
2156 pVM->hm.s.pFreeGuestPatchMem = pPatchMem;
2157 pVM->hm.s.cbGuestPatchMem = cbPatchMem;
2158 return VINF_SUCCESS;
2159}
2160
2161
2162/**
2163 * Enable patching in a VT-x/AMD-V guest
2164 *
2165 * @returns VBox status code.
2166 * @param pVM The cross context VM structure.
2167 * @param pPatchMem Patch memory range.
2168 * @param cbPatchMem Size of the memory range.
2169 */
2170VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
2171{
2172 VM_ASSERT_EMT(pVM);
2173 Log(("HMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
2174 if (pVM->cCpus > 1)
2175 {
2176 /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
2177 int rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE,
2178 (PFNRT)hmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
2179 AssertRC(rc);
2180 return rc;
2181 }
2182 return hmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
2183}
2184
2185
2186/**
2187 * Disable patching in a VT-x/AMD-V guest.
2188 *
2189 * @returns VBox status code.
2190 * @param pVM The cross context VM structure.
2191 * @param pPatchMem Patch memory range.
2192 * @param cbPatchMem Size of the memory range.
2193 */
2194VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
2195{
2196 Log(("HMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
2197 RT_NOREF2(pPatchMem, cbPatchMem);
2198
2199 Assert(pVM->hm.s.pGuestPatchMem == pPatchMem);
2200 Assert(pVM->hm.s.cbGuestPatchMem == cbPatchMem);
2201
2202 /** @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
2203 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches,
2204 (void *)(uintptr_t)VMMGetCpuId(pVM));
2205 AssertRC(rc);
2206
2207 pVM->hm.s.pGuestPatchMem = 0;
2208 pVM->hm.s.pFreeGuestPatchMem = 0;
2209 pVM->hm.s.cbGuestPatchMem = 0;
2210 pVM->hm.s.fTprPatchingActive = false;
2211 return VINF_SUCCESS;
2212}
2213
2214
2215/**
2216 * Callback to patch a TPR instruction (vmmcall or mov cr8).
2217 *
2218 * @returns VBox strict status code.
2219 * @param pVM The cross context VM structure.
2220 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2221 * @param pvUser User specified CPU context.
2222 *
2223 */
2224static DECLCALLBACK(VBOXSTRICTRC) hmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
2225{
2226 /*
2227 * Only execute the handler on the VCPU the original patch request was
2228 * issued. (The other CPU(s) might not yet have switched to protected
2229 * mode, nor have the correct memory context.)
2230 */
2231 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2232 if (pVCpu->idCpu != idCpu)
2233 return VINF_SUCCESS;
2234
2235 /*
2236 * We're racing other VCPUs here, so don't try patch the instruction twice
2237 * and make sure there is still room for our patch record.
2238 */
2239 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2240 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
2241 if (pPatch)
2242 {
2243 Log(("hmR3ReplaceTprInstr: already patched %RGv\n", pCtx->rip));
2244 return VINF_SUCCESS;
2245 }
2246 uint32_t const idx = pVM->hm.s.cPatches;
2247 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
2248 {
2249 Log(("hmR3ReplaceTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
2250 return VINF_SUCCESS;
2251 }
2252 pPatch = &pVM->hm.s.aPatches[idx];
2253
2254 Log(("hmR3ReplaceTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
2255
2256 /*
2257 * Disassembler the instruction and get cracking.
2258 */
2259 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3ReplaceTprInstr");
2260 DISCPUSTATE Dis;
2261 uint32_t cbOp;
2262 int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbOp);
2263 AssertRC(rc);
2264 if ( rc == VINF_SUCCESS
2265 && Dis.pCurInstr->uOpcode == OP_MOV
2266 && cbOp >= 3)
2267 {
2268 static uint8_t const s_abVMMCall[3] = { 0x0f, 0x01, 0xd9 };
2269
2270 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
2271 AssertRC(rc);
2272
2273 pPatch->cbOp = cbOp;
2274
2275 if (Dis.Param1.fUse == DISUSE_DISPLACEMENT32)
2276 {
2277 /* write. */
2278 if (Dis.Param2.fUse == DISUSE_REG_GEN32)
2279 {
2280 pPatch->enmType = HMTPRINSTR_WRITE_REG;
2281 pPatch->uSrcOperand = Dis.Param2.Base.idxGenReg;
2282 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_REG %u\n", Dis.Param2.Base.idxGenReg));
2283 }
2284 else
2285 {
2286 Assert(Dis.Param2.fUse == DISUSE_IMMEDIATE32);
2287 pPatch->enmType = HMTPRINSTR_WRITE_IMM;
2288 pPatch->uSrcOperand = Dis.Param2.uValue;
2289 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_IMM %#llx\n", Dis.Param2.uValue));
2290 }
2291 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
2292 AssertRC(rc);
2293
2294 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
2295 pPatch->cbNewOp = sizeof(s_abVMMCall);
2296 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessVmc);
2297 }
2298 else
2299 {
2300 /*
2301 * TPR Read.
2302 *
2303 * Found:
2304 * mov eax, dword [fffe0080] (5 bytes)
2305 * Check if next instruction is:
2306 * shr eax, 4
2307 */
2308 Assert(Dis.Param1.fUse == DISUSE_REG_GEN32);
2309
2310 uint8_t const idxMmioReg = Dis.Param1.Base.idxGenReg;
2311 uint8_t const cbOpMmio = cbOp;
2312 uint64_t const uSavedRip = pCtx->rip;
2313
2314 pCtx->rip += cbOp;
2315 rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbOp);
2316 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Following read");
2317 pCtx->rip = uSavedRip;
2318
2319 if ( rc == VINF_SUCCESS
2320 && Dis.pCurInstr->uOpcode == OP_SHR
2321 && Dis.Param1.fUse == DISUSE_REG_GEN32
2322 && Dis.Param1.Base.idxGenReg == idxMmioReg
2323 && Dis.Param2.fUse == DISUSE_IMMEDIATE8
2324 && Dis.Param2.uValue == 4
2325 && cbOpMmio + cbOp < sizeof(pVM->hm.s.aPatches[idx].aOpcode))
2326 {
2327 uint8_t abInstr[15];
2328
2329 /* Replacing the two instructions above with an AMD-V specific lock-prefixed 32-bit MOV CR8 instruction so as to
2330 access CR8 in 32-bit mode and not cause a #VMEXIT. */
2331 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, cbOpMmio + cbOp);
2332 AssertRC(rc);
2333
2334 pPatch->cbOp = cbOpMmio + cbOp;
2335
2336 /* 0xf0, 0x0f, 0x20, 0xc0 = mov eax, cr8 */
2337 abInstr[0] = 0xf0;
2338 abInstr[1] = 0x0f;
2339 abInstr[2] = 0x20;
2340 abInstr[3] = 0xc0 | Dis.Param1.Base.idxGenReg;
2341 for (unsigned i = 4; i < pPatch->cbOp; i++)
2342 abInstr[i] = 0x90; /* nop */
2343
2344 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, abInstr, pPatch->cbOp);
2345 AssertRC(rc);
2346
2347 memcpy(pPatch->aNewOpcode, abInstr, pPatch->cbOp);
2348 pPatch->cbNewOp = pPatch->cbOp;
2349 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessCr8);
2350
2351 Log(("Acceptable read/shr candidate!\n"));
2352 pPatch->enmType = HMTPRINSTR_READ_SHR4;
2353 }
2354 else
2355 {
2356 pPatch->enmType = HMTPRINSTR_READ;
2357 pPatch->uDstOperand = idxMmioReg;
2358
2359 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
2360 AssertRC(rc);
2361
2362 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
2363 pPatch->cbNewOp = sizeof(s_abVMMCall);
2364 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessVmc);
2365 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_READ %u\n", pPatch->uDstOperand));
2366 }
2367 }
2368
2369 pPatch->Core.Key = pCtx->eip;
2370 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2371 AssertRC(rc);
2372
2373 pVM->hm.s.cPatches++;
2374 return VINF_SUCCESS;
2375 }
2376
2377 /*
2378 * Save invalid patch, so we will not try again.
2379 */
2380 Log(("hmR3ReplaceTprInstr: Failed to patch instr!\n"));
2381 pPatch->Core.Key = pCtx->eip;
2382 pPatch->enmType = HMTPRINSTR_INVALID;
2383 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2384 AssertRC(rc);
2385 pVM->hm.s.cPatches++;
2386 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceFailure);
2387 return VINF_SUCCESS;
2388}
2389
2390
2391/**
2392 * Callback to patch a TPR instruction (jump to generated code).
2393 *
2394 * @returns VBox strict status code.
2395 * @param pVM The cross context VM structure.
2396 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2397 * @param pvUser User specified CPU context.
2398 *
2399 */
2400static DECLCALLBACK(VBOXSTRICTRC) hmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
2401{
2402 /*
2403 * Only execute the handler on the VCPU the original patch request was
2404 * issued. (The other CPU(s) might not yet have switched to protected
2405 * mode, nor have the correct memory context.)
2406 */
2407 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2408 if (pVCpu->idCpu != idCpu)
2409 return VINF_SUCCESS;
2410
2411 /*
2412 * We're racing other VCPUs here, so don't try patch the instruction twice
2413 * and make sure there is still room for our patch record.
2414 */
2415 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2416 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
2417 if (pPatch)
2418 {
2419 Log(("hmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
2420 return VINF_SUCCESS;
2421 }
2422 uint32_t const idx = pVM->hm.s.cPatches;
2423 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
2424 {
2425 Log(("hmR3PatchTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
2426 return VINF_SUCCESS;
2427 }
2428 pPatch = &pVM->hm.s.aPatches[idx];
2429
2430 Log(("hmR3PatchTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
2431 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3PatchTprInstr");
2432
2433 /*
2434 * Disassemble the instruction and get cracking.
2435 */
2436 DISCPUSTATE Dis;
2437 uint32_t cbOp;
2438 int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbOp);
2439 AssertRC(rc);
2440 if ( rc == VINF_SUCCESS
2441 && Dis.pCurInstr->uOpcode == OP_MOV
2442 && cbOp >= 5)
2443 {
2444 uint8_t aPatch[64];
2445 uint32_t off = 0;
2446
2447 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
2448 AssertRC(rc);
2449
2450 pPatch->cbOp = cbOp;
2451 pPatch->enmType = HMTPRINSTR_JUMP_REPLACEMENT;
2452
2453 if (Dis.Param1.fUse == DISUSE_DISPLACEMENT32)
2454 {
2455 /*
2456 * TPR write:
2457 *
2458 * push ECX [51]
2459 * push EDX [52]
2460 * push EAX [50]
2461 * xor EDX,EDX [31 D2]
2462 * mov EAX,EAX [89 C0]
2463 * or
2464 * mov EAX,0000000CCh [B8 CC 00 00 00]
2465 * mov ECX,0C0000082h [B9 82 00 00 C0]
2466 * wrmsr [0F 30]
2467 * pop EAX [58]
2468 * pop EDX [5A]
2469 * pop ECX [59]
2470 * jmp return_address [E9 return_address]
2471 */
2472 bool fUsesEax = (Dis.Param2.fUse == DISUSE_REG_GEN32 && Dis.Param2.Base.idxGenReg == DISGREG_EAX);
2473
2474 aPatch[off++] = 0x51; /* push ecx */
2475 aPatch[off++] = 0x52; /* push edx */
2476 if (!fUsesEax)
2477 aPatch[off++] = 0x50; /* push eax */
2478 aPatch[off++] = 0x31; /* xor edx, edx */
2479 aPatch[off++] = 0xd2;
2480 if (Dis.Param2.fUse == DISUSE_REG_GEN32)
2481 {
2482 if (!fUsesEax)
2483 {
2484 aPatch[off++] = 0x89; /* mov eax, src_reg */
2485 aPatch[off++] = MAKE_MODRM(3, Dis.Param2.Base.idxGenReg, DISGREG_EAX);
2486 }
2487 }
2488 else
2489 {
2490 Assert(Dis.Param2.fUse == DISUSE_IMMEDIATE32);
2491 aPatch[off++] = 0xb8; /* mov eax, immediate */
2492 *(uint32_t *)&aPatch[off] = Dis.Param2.uValue;
2493 off += sizeof(uint32_t);
2494 }
2495 aPatch[off++] = 0xb9; /* mov ecx, 0xc0000082 */
2496 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2497 off += sizeof(uint32_t);
2498
2499 aPatch[off++] = 0x0f; /* wrmsr */
2500 aPatch[off++] = 0x30;
2501 if (!fUsesEax)
2502 aPatch[off++] = 0x58; /* pop eax */
2503 aPatch[off++] = 0x5a; /* pop edx */
2504 aPatch[off++] = 0x59; /* pop ecx */
2505 }
2506 else
2507 {
2508 /*
2509 * TPR read:
2510 *
2511 * push ECX [51]
2512 * push EDX [52]
2513 * push EAX [50]
2514 * mov ECX,0C0000082h [B9 82 00 00 C0]
2515 * rdmsr [0F 32]
2516 * mov EAX,EAX [89 C0]
2517 * pop EAX [58]
2518 * pop EDX [5A]
2519 * pop ECX [59]
2520 * jmp return_address [E9 return_address]
2521 */
2522 Assert(Dis.Param1.fUse == DISUSE_REG_GEN32);
2523
2524 if (Dis.Param1.Base.idxGenReg != DISGREG_ECX)
2525 aPatch[off++] = 0x51; /* push ecx */
2526 if (Dis.Param1.Base.idxGenReg != DISGREG_EDX )
2527 aPatch[off++] = 0x52; /* push edx */
2528 if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
2529 aPatch[off++] = 0x50; /* push eax */
2530
2531 aPatch[off++] = 0x31; /* xor edx, edx */
2532 aPatch[off++] = 0xd2;
2533
2534 aPatch[off++] = 0xb9; /* mov ecx, 0xc0000082 */
2535 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2536 off += sizeof(uint32_t);
2537
2538 aPatch[off++] = 0x0f; /* rdmsr */
2539 aPatch[off++] = 0x32;
2540
2541 if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
2542 {
2543 aPatch[off++] = 0x89; /* mov dst_reg, eax */
2544 aPatch[off++] = MAKE_MODRM(3, DISGREG_EAX, Dis.Param1.Base.idxGenReg);
2545 }
2546
2547 if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
2548 aPatch[off++] = 0x58; /* pop eax */
2549 if (Dis.Param1.Base.idxGenReg != DISGREG_EDX )
2550 aPatch[off++] = 0x5a; /* pop edx */
2551 if (Dis.Param1.Base.idxGenReg != DISGREG_ECX)
2552 aPatch[off++] = 0x59; /* pop ecx */
2553 }
2554 aPatch[off++] = 0xe9; /* jmp return_address */
2555 *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem + off + 4);
2556 off += sizeof(RTRCUINTPTR);
2557
2558 if (pVM->hm.s.pFreeGuestPatchMem + off <= pVM->hm.s.pGuestPatchMem + pVM->hm.s.cbGuestPatchMem)
2559 {
2560 /* Write new code to the patch buffer. */
2561 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hm.s.pFreeGuestPatchMem, aPatch, off);
2562 AssertRC(rc);
2563
2564#ifdef LOG_ENABLED
2565 uint32_t cbCurInstr;
2566 for (RTGCPTR GCPtrInstr = pVM->hm.s.pFreeGuestPatchMem;
2567 GCPtrInstr < pVM->hm.s.pFreeGuestPatchMem + off;
2568 GCPtrInstr += RT_MAX(cbCurInstr, 1))
2569 {
2570 char szOutput[256];
2571 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, GCPtrInstr, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2572 szOutput, sizeof(szOutput), &cbCurInstr);
2573 if (RT_SUCCESS(rc))
2574 Log(("Patch instr %s\n", szOutput));
2575 else
2576 Log(("%RGv: rc=%Rrc\n", GCPtrInstr, rc));
2577 }
2578#endif
2579
2580 pPatch->aNewOpcode[0] = 0xE9;
2581 *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
2582
2583 /* Overwrite the TPR instruction with a jump. */
2584 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
2585 AssertRC(rc);
2586
2587 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Jump");
2588
2589 pVM->hm.s.pFreeGuestPatchMem += off;
2590 pPatch->cbNewOp = 5;
2591
2592 pPatch->Core.Key = pCtx->eip;
2593 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2594 AssertRC(rc);
2595
2596 pVM->hm.s.cPatches++;
2597 pVM->hm.s.fTprPatchingActive = true;
2598 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchSuccess);
2599 return VINF_SUCCESS;
2600 }
2601
2602 Log(("Ran out of space in our patch buffer!\n"));
2603 }
2604 else
2605 Log(("hmR3PatchTprInstr: Failed to patch instr!\n"));
2606
2607
2608 /*
2609 * Save invalid patch, so we will not try again.
2610 */
2611 pPatch = &pVM->hm.s.aPatches[idx];
2612 pPatch->Core.Key = pCtx->eip;
2613 pPatch->enmType = HMTPRINSTR_INVALID;
2614 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2615 AssertRC(rc);
2616 pVM->hm.s.cPatches++;
2617 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchFailure);
2618 return VINF_SUCCESS;
2619}
2620
2621
2622/**
2623 * Attempt to patch TPR mmio instructions.
2624 *
2625 * @returns VBox status code.
2626 * @param pVM The cross context VM structure.
2627 * @param pVCpu The cross context virtual CPU structure.
2628 */
2629VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu)
2630{
2631 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE,
2632 pVM->hm.s.pGuestPatchMem ? hmR3PatchTprInstr : hmR3ReplaceTprInstr,
2633 (void *)(uintptr_t)pVCpu->idCpu);
2634 AssertRC(rc);
2635 return rc;
2636}
2637
2638
2639/**
2640 * Checks if we need to reschedule due to VMM device heap changes.
2641 *
2642 * @returns true if a reschedule is required, otherwise false.
2643 * @param pVM The cross context VM structure.
2644 * @param pCtx VM execution context.
2645 */
2646VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCCPUMCTX pCtx)
2647{
2648 /*
2649 * The VMM device heap is a requirement for emulating real-mode or protected-mode without paging
2650 * when the unrestricted guest execution feature is missing (VT-x only).
2651 */
2652 if ( pVM->hm.s.vmx.fEnabled
2653 && !pVM->hm.s.vmx.fUnrestrictedGuestCfg
2654 && CPUMIsGuestInRealModeEx(pCtx)
2655 && !PDMVmmDevHeapIsEnabled(pVM))
2656 return true;
2657
2658 return false;
2659}
2660
2661
2662/**
2663 * Noticiation callback from DBGF when interrupt breakpoints or generic debug
2664 * event settings changes.
2665 *
2666 * DBGF will call HMR3NotifyDebugEventChangedPerCpu on each CPU afterwards, this
2667 * function is just updating the VM globals.
2668 *
2669 * @param pVM The VM cross context VM structure.
2670 * @thread EMT(0)
2671 */
2672VMMR3_INT_DECL(void) HMR3NotifyDebugEventChanged(PVM pVM)
2673{
2674 /* Interrupts. */
2675 bool fUseDebugLoop = pVM->dbgf.ro.cSoftIntBreakpoints > 0
2676 || pVM->dbgf.ro.cHardIntBreakpoints > 0;
2677
2678 /* CPU Exceptions. */
2679 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_XCPT_FIRST;
2680 !fUseDebugLoop && enmEvent <= DBGFEVENT_XCPT_LAST;
2681 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2682 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2683
2684 /* Common VM exits. */
2685 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_FIRST;
2686 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_LAST_COMMON;
2687 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2688 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2689
2690 /* Vendor specific VM exits. */
2691 if (HMR3IsVmxEnabled(pVM->pUVM))
2692 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_VMX_FIRST;
2693 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_VMX_LAST;
2694 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2695 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2696 else
2697 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_SVM_FIRST;
2698 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_SVM_LAST;
2699 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2700 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2701
2702 /* Done. */
2703 pVM->hm.s.fUseDebugLoop = fUseDebugLoop;
2704}
2705
2706
2707/**
2708 * Follow up notification callback to HMR3NotifyDebugEventChanged for each CPU.
2709 *
2710 * HM uses this to combine the decision made by HMR3NotifyDebugEventChanged with
2711 * per CPU settings.
2712 *
2713 * @param pVM The VM cross context VM structure.
2714 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2715 */
2716VMMR3_INT_DECL(void) HMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu)
2717{
2718 pVCpu->hm.s.fUseDebugLoop = pVCpu->hm.s.fSingleInstruction | pVM->hm.s.fUseDebugLoop;
2719}
2720
2721
2722/**
2723 * Checks if we are currently using hardware acceleration.
2724 *
2725 * @returns true if hardware acceleration is being used, otherwise false.
2726 * @param pVCpu The cross context virtual CPU structure.
2727 */
2728VMMR3_INT_DECL(bool) HMR3IsActive(PCVMCPU pVCpu)
2729{
2730 return pVCpu->hm.s.fActive;
2731}
2732
2733
2734/**
2735 * External interface for querying whether hardware acceleration is enabled.
2736 *
2737 * @returns true if VT-x or AMD-V is being used, otherwise false.
2738 * @param pUVM The user mode VM handle.
2739 * @sa HMIsEnabled, HMIsEnabledNotMacro.
2740 */
2741VMMR3DECL(bool) HMR3IsEnabled(PUVM pUVM)
2742{
2743 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2744 PVM pVM = pUVM->pVM;
2745 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2746 return pVM->fHMEnabled; /* Don't use the macro as the GUI may query us very very early. */
2747}
2748
2749
2750/**
2751 * External interface for querying whether VT-x is being used.
2752 *
2753 * @returns true if VT-x is being used, otherwise false.
2754 * @param pUVM The user mode VM handle.
2755 * @sa HMR3IsSvmEnabled, HMIsEnabled
2756 */
2757VMMR3DECL(bool) HMR3IsVmxEnabled(PUVM pUVM)
2758{
2759 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2760 PVM pVM = pUVM->pVM;
2761 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2762 return pVM->hm.s.vmx.fEnabled
2763 && pVM->hm.s.vmx.fSupported
2764 && pVM->fHMEnabled;
2765}
2766
2767
2768/**
2769 * External interface for querying whether AMD-V is being used.
2770 *
2771 * @returns true if VT-x is being used, otherwise false.
2772 * @param pUVM The user mode VM handle.
2773 * @sa HMR3IsVmxEnabled, HMIsEnabled
2774 */
2775VMMR3DECL(bool) HMR3IsSvmEnabled(PUVM pUVM)
2776{
2777 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2778 PVM pVM = pUVM->pVM;
2779 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2780 return pVM->hm.s.svm.fEnabled
2781 && pVM->hm.s.svm.fSupported
2782 && pVM->fHMEnabled;
2783}
2784
2785
2786/**
2787 * Checks if we are currently using nested paging.
2788 *
2789 * @returns true if nested paging is being used, otherwise false.
2790 * @param pUVM The user mode VM handle.
2791 */
2792VMMR3DECL(bool) HMR3IsNestedPagingActive(PUVM pUVM)
2793{
2794 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2795 PVM pVM = pUVM->pVM;
2796 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2797 return pVM->hm.s.fNestedPagingCfg;
2798}
2799
2800
2801/**
2802 * Checks if virtualized APIC registers are enabled.
2803 *
2804 * When enabled this feature allows the hardware to access most of the
2805 * APIC registers in the virtual-APIC page without causing VM-exits. See
2806 * Intel spec. 29.1.1 "Virtualized APIC Registers".
2807 *
2808 * @returns true if virtualized APIC registers is enabled, otherwise
2809 * false.
2810 * @param pUVM The user mode VM handle.
2811 */
2812VMMR3DECL(bool) HMR3AreVirtApicRegsEnabled(PUVM pUVM)
2813{
2814 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2815 PVM pVM = pUVM->pVM;
2816 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2817 return pVM->hm.s.fVirtApicRegs;
2818}
2819
2820
2821/**
2822 * Checks if APIC posted-interrupt processing is enabled.
2823 *
2824 * This returns whether we can deliver interrupts to the guest without
2825 * leaving guest-context by updating APIC state from host-context.
2826 *
2827 * @returns true if APIC posted-interrupt processing is enabled,
2828 * otherwise false.
2829 * @param pUVM The user mode VM handle.
2830 */
2831VMMR3DECL(bool) HMR3IsPostedIntrsEnabled(PUVM pUVM)
2832{
2833 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2834 PVM pVM = pUVM->pVM;
2835 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2836 return pVM->hm.s.fPostedIntrs;
2837}
2838
2839
2840/**
2841 * Checks if we are currently using VPID in VT-x mode.
2842 *
2843 * @returns true if VPID is being used, otherwise false.
2844 * @param pUVM The user mode VM handle.
2845 */
2846VMMR3DECL(bool) HMR3IsVpidActive(PUVM pUVM)
2847{
2848 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2849 PVM pVM = pUVM->pVM;
2850 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2851 return pVM->hm.s.ForR3.vmx.fVpid;
2852}
2853
2854
2855/**
2856 * Checks if we are currently using VT-x unrestricted execution,
2857 * aka UX.
2858 *
2859 * @returns true if UX is being used, otherwise false.
2860 * @param pUVM The user mode VM handle.
2861 */
2862VMMR3DECL(bool) HMR3IsUXActive(PUVM pUVM)
2863{
2864 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2865 PVM pVM = pUVM->pVM;
2866 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2867 return pVM->hm.s.vmx.fUnrestrictedGuestCfg
2868 || pVM->hm.s.svm.fSupported;
2869}
2870
2871
2872/**
2873 * Checks if the VMX-preemption timer is being used.
2874 *
2875 * @returns true if the VMX-preemption timer is being used, otherwise false.
2876 * @param pVM The cross context VM structure.
2877 */
2878VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM)
2879{
2880 return HMIsEnabled(pVM)
2881 && pVM->hm.s.vmx.fEnabled
2882 && pVM->hm.s.vmx.fUsePreemptTimerCfg;
2883}
2884
2885
2886#ifdef TODO_9217_VMCSINFO
2887/**
2888 * Helper for HMR3CheckError to log VMCS controls to the release log.
2889 *
2890 * @param idCpu The Virtual CPU ID.
2891 * @param pVmcsInfo The VMCS info. object.
2892 */
2893static void hmR3CheckErrorLogVmcsCtls(VMCPUID idCpu, PCVMXVMCSINFO pVmcsInfo)
2894{
2895 LogRel(("HM: CPU[%u] PinCtls %#RX32\n", idCpu, pVmcsInfo->u32PinCtls));
2896 {
2897 uint32_t const u32Val = pVmcsInfo->u32PinCtls;
2898 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_EXT_INT_EXIT );
2899 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_NMI_EXIT );
2900 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_VIRT_NMI );
2901 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_PREEMPT_TIMER);
2902 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_POSTED_INT );
2903 }
2904 LogRel(("HM: CPU[%u] ProcCtls %#RX32\n", idCpu, pVmcsInfo->u32ProcCtls));
2905 {
2906 uint32_t const u32Val = pVmcsInfo->u32ProcCtls;
2907 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INT_WINDOW_EXIT );
2908 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TSC_OFFSETTING);
2909 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_HLT_EXIT );
2910 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INVLPG_EXIT );
2911 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MWAIT_EXIT );
2912 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_RDPMC_EXIT );
2913 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_RDTSC_EXIT );
2914 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR3_LOAD_EXIT );
2915 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR3_STORE_EXIT );
2916 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TERTIARY_CTLS );
2917 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR8_LOAD_EXIT );
2918 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR8_STORE_EXIT );
2919 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TPR_SHADOW );
2920 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_NMI_WINDOW_EXIT );
2921 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MOV_DR_EXIT );
2922 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_UNCOND_IO_EXIT );
2923 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_IO_BITMAPS );
2924 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MONITOR_TRAP_FLAG );
2925 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_MSR_BITMAPS );
2926 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MONITOR_EXIT );
2927 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_PAUSE_EXIT );
2928 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_SECONDARY_CTLS);
2929 }
2930 LogRel(("HM: CPU[%u] ProcCtls2 %#RX32\n", idCpu, pVmcsInfo->u32ProcCtls2));
2931 {
2932 uint32_t const u32Val = pVmcsInfo->u32ProcCtls2;
2933 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_APIC_ACCESS );
2934 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT );
2935 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_DESC_TABLE_EXIT );
2936 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDTSCP );
2937 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_X2APIC_MODE );
2938 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VPID );
2939 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_WBINVD_EXIT );
2940 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_UNRESTRICTED_GUEST );
2941 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_APIC_REG_VIRT );
2942 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_INT_DELIVERY );
2943 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT );
2944 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDRAND_EXIT );
2945 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_INVPCID );
2946 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VMFUNC );
2947 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VMCS_SHADOWING );
2948 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_ENCLS_EXIT );
2949 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDSEED_EXIT );
2950 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PML );
2951 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT_XCPT_VE );
2952 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_CONCEAL_VMX_FROM_PT);
2953 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_XSAVES_XRSTORS );
2954 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_MODE_BASED_EPT_PERM);
2955 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_SPP_EPT );
2956 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PT_EPT );
2957 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_TSC_SCALING );
2958 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_USER_WAIT_PAUSE );
2959 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_ENCLV_EXIT );
2960 }
2961 LogRel(("HM: CPU[%u] EntryCtls %#RX32\n", idCpu, pVmcsInfo->u32EntryCtls));
2962 {
2963 uint32_t const u32Val = pVmcsInfo->u32EntryCtls;
2964 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_DEBUG );
2965 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_IA32E_MODE_GUEST );
2966 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_ENTRY_TO_SMM );
2967 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON);
2968 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PERF_MSR );
2969 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PAT_MSR );
2970 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_EFER_MSR );
2971 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR );
2972 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT);
2973 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_RTIT_CTL_MSR );
2974 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_CET_STATE );
2975 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PKRS_MSR );
2976 }
2977 LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", idCpu, pVmcsInfo->u32ExitCtls));
2978 {
2979 uint32_t const u32Val = pVmcsInfo->u32ExitCtls;
2980 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_DEBUG );
2981 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE );
2982 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PERF_MSR );
2983 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_ACK_EXT_INT );
2984 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PAT_MSR );
2985 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PAT_MSR );
2986 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_EFER_MSR );
2987 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_EFER_MSR );
2988 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER );
2989 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR );
2990 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT );
2991 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CLEAR_RTIT_CTL_MSR );
2992 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_CET_STATE );
2993 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PKRS_MSR );
2994 }
2995}
2996#endif
2997
2998
2999/**
3000 * Check fatal VT-x/AMD-V error and produce some meaningful
3001 * log release message.
3002 *
3003 * @param pVM The cross context VM structure.
3004 * @param iStatusCode VBox status code.
3005 */
3006VMMR3_INT_DECL(void) HMR3CheckError(PVM pVM, int iStatusCode)
3007{
3008 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3009 {
3010 /** @todo r=ramshankar: Are all EMTs out of ring-0 at this point!? If not, we
3011 * might be getting inaccurate values for non-guru'ing EMTs. */
3012 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3013#ifdef TODO_9217_VMCSINFO
3014 PCVMXVMCSINFOSHARED pVmcsInfo = hmGetVmxActiveVmcsInfoShared(pVCpu);
3015#endif
3016 bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3;
3017 switch (iStatusCode)
3018 {
3019 case VERR_VMX_INVALID_VMCS_PTR:
3020 {
3021 LogRel(("HM: VERR_VMX_INVALID_VMCS_PTR:\n"));
3022 LogRel(("HM: CPU[%u] %s VMCS active\n", idCpu, fNstGstVmcsActive ? "Nested-guest" : "Guest"));
3023#ifdef TODO_9217_VMCSINFO
3024 LogRel(("HM: CPU[%u] Current pointer %#RHp vs %#RHp\n", idCpu, pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs,
3025 pVmcsInfo->HCPhysVmcs));
3026#endif
3027 LogRel(("HM: CPU[%u] Current VMCS version %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32VmcsRev));
3028 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
3029 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
3030 break;
3031 }
3032
3033 case VERR_VMX_UNABLE_TO_START_VM:
3034 {
3035 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM:\n"));
3036 LogRel(("HM: CPU[%u] %s VMCS active\n", idCpu, fNstGstVmcsActive ? "Nested-guest" : "Guest"));
3037 LogRel(("HM: CPU[%u] Instruction error %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32InstrError));
3038 LogRel(("HM: CPU[%u] Exit reason %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32ExitReason));
3039
3040 if ( pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS
3041 || pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS)
3042 {
3043 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
3044 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
3045 }
3046 else if (pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTLS)
3047 {
3048#ifdef TODO_9217_VMCSINFO
3049 hmR3CheckErrorLogVmcsCtls(idCpu, pVmcsInfo);
3050 LogRel(("HM: CPU[%u] HCPhysMsrBitmap %#RHp\n", idCpu, pVmcsInfo->HCPhysMsrBitmap));
3051 LogRel(("HM: CPU[%u] HCPhysGuestMsrLoad %#RHp\n", idCpu, pVmcsInfo->HCPhysGuestMsrLoad));
3052 LogRel(("HM: CPU[%u] HCPhysGuestMsrStore %#RHp\n", idCpu, pVmcsInfo->HCPhysGuestMsrStore));
3053 LogRel(("HM: CPU[%u] HCPhysHostMsrLoad %#RHp\n", idCpu, pVmcsInfo->HCPhysHostMsrLoad));
3054 LogRel(("HM: CPU[%u] cEntryMsrLoad %u\n", idCpu, pVmcsInfo->cEntryMsrLoad));
3055 LogRel(("HM: CPU[%u] cExitMsrStore %u\n", idCpu, pVmcsInfo->cExitMsrStore));
3056 LogRel(("HM: CPU[%u] cExitMsrLoad %u\n", idCpu, pVmcsInfo->cExitMsrLoad));
3057#endif
3058 }
3059 /** @todo Log VM-entry event injection control fields
3060 * VMX_VMCS_CTRL_ENTRY_IRQ_INFO, VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE
3061 * and VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH from the VMCS. */
3062 break;
3063 }
3064
3065 case VERR_VMX_INVALID_GUEST_STATE:
3066 {
3067 LogRel(("HM: VERR_VMX_INVALID_GUEST_STATE:\n"));
3068 LogRel(("HM: CPU[%u] HM error = %#RX32\n", idCpu, pVCpu->hm.s.u32HMError));
3069 LogRel(("HM: CPU[%u] Guest-intr. state = %#RX32\n", idCpu, pVCpu->hm.s.vmx.LastError.u32GuestIntrState));
3070#ifdef TODO_9217_VMCSINFO
3071 hmR3CheckErrorLogVmcsCtls(idCpu, pVmcsInfo);
3072#endif
3073 break;
3074 }
3075
3076 /* The guru will dump the HM error and exit history. Nothing extra to report for these errors. */
3077 case VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO:
3078 case VERR_VMX_INVALID_VMXON_PTR:
3079 case VERR_VMX_UNEXPECTED_EXIT:
3080 case VERR_VMX_INVALID_VMCS_FIELD:
3081 case VERR_SVM_UNKNOWN_EXIT:
3082 case VERR_SVM_UNEXPECTED_EXIT:
3083 case VERR_SVM_UNEXPECTED_PATCH_TYPE:
3084 case VERR_SVM_UNEXPECTED_XCPT_EXIT:
3085 case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE:
3086 break;
3087 }
3088 }
3089
3090 if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM)
3091 {
3092 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-1 %#RX32\n", pVM->hm.s.ForR3.vmx.Msrs.EntryCtls.n.allowed1));
3093 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-0 %#RX32\n", pVM->hm.s.ForR3.vmx.Msrs.EntryCtls.n.allowed0));
3094 }
3095 else if (iStatusCode == VERR_VMX_INVALID_VMXON_PTR)
3096 LogRel(("HM: HCPhysVmxEnableError = %#RHp\n", pVM->hm.s.ForR3.vmx.HCPhysVmxEnableError));
3097}
3098
3099
3100/**
3101 * Execute state save operation.
3102 *
3103 * Save only data that cannot be re-loaded while entering HM ring-0 code. This
3104 * is because we always save the VM state from ring-3 and thus most HM state
3105 * will be re-synced dynamically at runtime and don't need to be part of the VM
3106 * saved state.
3107 *
3108 * @returns VBox status code.
3109 * @param pVM The cross context VM structure.
3110 * @param pSSM SSM operation handle.
3111 */
3112static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM)
3113{
3114 Log(("hmR3Save:\n"));
3115
3116 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3117 {
3118 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3119 Assert(!pVCpu->hm.s.Event.fPending);
3120 if (pVM->cpum.ro.GuestFeatures.fSvm)
3121 {
3122 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
3123 SSMR3PutBool(pSSM, pVmcbNstGstCache->fCacheValid);
3124 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdCRx);
3125 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrCRx);
3126 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdDRx);
3127 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrDRx);
3128 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16PauseFilterThreshold);
3129 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16PauseFilterCount);
3130 SSMR3PutU32(pSSM, pVmcbNstGstCache->u32InterceptXcpt);
3131 SSMR3PutU64(pSSM, pVmcbNstGstCache->u64InterceptCtrl);
3132 SSMR3PutU64(pSSM, pVmcbNstGstCache->u64TSCOffset);
3133 SSMR3PutBool(pSSM, pVmcbNstGstCache->fVIntrMasking);
3134 SSMR3PutBool(pSSM, pVmcbNstGstCache->fNestedPaging);
3135 SSMR3PutBool(pSSM, pVmcbNstGstCache->fLbrVirt);
3136 }
3137 }
3138
3139 /* Save the guest patch data. */
3140 SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem);
3141 SSMR3PutGCPtr(pSSM, pVM->hm.s.pFreeGuestPatchMem);
3142 SSMR3PutU32(pSSM, pVM->hm.s.cbGuestPatchMem);
3143
3144 /* Store all the guest patch records too. */
3145 int rc = SSMR3PutU32(pSSM, pVM->hm.s.cPatches);
3146 if (RT_FAILURE(rc))
3147 return rc;
3148
3149 for (uint32_t i = 0; i < pVM->hm.s.cPatches; i++)
3150 {
3151 AssertCompileSize(HMTPRINSTR, 4);
3152 PCHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
3153 SSMR3PutU32(pSSM, pPatch->Core.Key);
3154 SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
3155 SSMR3PutU32(pSSM, pPatch->cbOp);
3156 SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
3157 SSMR3PutU32(pSSM, pPatch->cbNewOp);
3158 SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
3159 SSMR3PutU32(pSSM, pPatch->uSrcOperand);
3160 SSMR3PutU32(pSSM, pPatch->uDstOperand);
3161 SSMR3PutU32(pSSM, pPatch->pJumpTarget);
3162 rc = SSMR3PutU32(pSSM, pPatch->cFaults);
3163 if (RT_FAILURE(rc))
3164 return rc;
3165 }
3166
3167 return VINF_SUCCESS;
3168}
3169
3170
3171/**
3172 * Execute state load operation.
3173 *
3174 * @returns VBox status code.
3175 * @param pVM The cross context VM structure.
3176 * @param pSSM SSM operation handle.
3177 * @param uVersion Data layout version.
3178 * @param uPass The data pass.
3179 */
3180static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3181{
3182 int rc;
3183
3184 LogFlowFunc(("uVersion=%u\n", uVersion));
3185 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
3186
3187 /*
3188 * Validate version.
3189 */
3190 if ( uVersion != HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT
3191 && uVersion != HM_SAVED_STATE_VERSION_TPR_PATCHING
3192 && uVersion != HM_SAVED_STATE_VERSION_NO_TPR_PATCHING
3193 && uVersion != HM_SAVED_STATE_VERSION_2_0_X)
3194 {
3195 AssertMsgFailed(("hmR3Load: Invalid version uVersion=%d!\n", uVersion));
3196 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3197 }
3198
3199 /*
3200 * Load per-VCPU state.
3201 */
3202 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3203 {
3204 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3205 if (uVersion >= HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT)
3206 {
3207 /* Load the SVM nested hw.virt state if the VM is configured for it. */
3208 if (pVM->cpum.ro.GuestFeatures.fSvm)
3209 {
3210 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
3211 SSMR3GetBool(pSSM, &pVmcbNstGstCache->fCacheValid);
3212 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdCRx);
3213 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrCRx);
3214 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdDRx);
3215 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrDRx);
3216 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16PauseFilterThreshold);
3217 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16PauseFilterCount);
3218 SSMR3GetU32(pSSM, &pVmcbNstGstCache->u32InterceptXcpt);
3219 SSMR3GetU64(pSSM, &pVmcbNstGstCache->u64InterceptCtrl);
3220 SSMR3GetU64(pSSM, &pVmcbNstGstCache->u64TSCOffset);
3221 SSMR3GetBool(pSSM, &pVmcbNstGstCache->fVIntrMasking);
3222 SSMR3GetBool(pSSM, &pVmcbNstGstCache->fNestedPaging);
3223 rc = SSMR3GetBool(pSSM, &pVmcbNstGstCache->fLbrVirt);
3224 AssertRCReturn(rc, rc);
3225 }
3226 }
3227 else
3228 {
3229 /* Pending HM event (obsolete for a long time since TPRM holds the info.) */
3230 SSMR3GetU32(pSSM, &pVCpu->hm.s.Event.fPending);
3231 SSMR3GetU32(pSSM, &pVCpu->hm.s.Event.u32ErrCode);
3232 SSMR3GetU64(pSSM, &pVCpu->hm.s.Event.u64IntInfo);
3233
3234 /* VMX fWasInRealMode related data. */
3235 uint32_t uDummy;
3236 SSMR3GetU32(pSSM, &uDummy);
3237 SSMR3GetU32(pSSM, &uDummy);
3238 rc = SSMR3GetU32(pSSM, &uDummy);
3239 AssertRCReturn(rc, rc);
3240 }
3241 }
3242
3243 /*
3244 * Load TPR patching data.
3245 */
3246 if (uVersion >= HM_SAVED_STATE_VERSION_TPR_PATCHING)
3247 {
3248 SSMR3GetGCPtr(pSSM, &pVM->hm.s.pGuestPatchMem);
3249 SSMR3GetGCPtr(pSSM, &pVM->hm.s.pFreeGuestPatchMem);
3250 SSMR3GetU32(pSSM, &pVM->hm.s.cbGuestPatchMem);
3251
3252 /* Fetch all TPR patch records. */
3253 rc = SSMR3GetU32(pSSM, &pVM->hm.s.cPatches);
3254 AssertRCReturn(rc, rc);
3255 for (uint32_t i = 0; i < pVM->hm.s.cPatches; i++)
3256 {
3257 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
3258 SSMR3GetU32(pSSM, &pPatch->Core.Key);
3259 SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
3260 SSMR3GetU32(pSSM, &pPatch->cbOp);
3261 SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
3262 SSMR3GetU32(pSSM, &pPatch->cbNewOp);
3263 SSM_GET_ENUM32_RET(pSSM, pPatch->enmType, HMTPRINSTR);
3264
3265 if (pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT)
3266 pVM->hm.s.fTprPatchingActive = true;
3267 Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fTprPatchingActive == false);
3268
3269 SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
3270 SSMR3GetU32(pSSM, &pPatch->uDstOperand);
3271 SSMR3GetU32(pSSM, &pPatch->cFaults);
3272 rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
3273 AssertRCReturn(rc, rc);
3274
3275 LogFlow(("hmR3Load: patch %d\n", i));
3276 LogFlow(("Key = %x\n", pPatch->Core.Key));
3277 LogFlow(("cbOp = %d\n", pPatch->cbOp));
3278 LogFlow(("cbNewOp = %d\n", pPatch->cbNewOp));
3279 LogFlow(("type = %d\n", pPatch->enmType));
3280 LogFlow(("srcop = %d\n", pPatch->uSrcOperand));
3281 LogFlow(("dstop = %d\n", pPatch->uDstOperand));
3282 LogFlow(("cFaults = %d\n", pPatch->cFaults));
3283 LogFlow(("target = %x\n", pPatch->pJumpTarget));
3284
3285 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
3286 AssertRCReturn(rc, rc);
3287 }
3288 }
3289
3290 return VINF_SUCCESS;
3291}
3292
3293
3294/**
3295 * Displays HM info.
3296 *
3297 * @param pVM The cross context VM structure.
3298 * @param pHlp The info helper functions.
3299 * @param pszArgs Arguments, ignored.
3300 */
3301static DECLCALLBACK(void) hmR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3302{
3303 NOREF(pszArgs);
3304 PVMCPU pVCpu = VMMGetCpu(pVM);
3305 if (!pVCpu)
3306 pVCpu = pVM->apCpusR3[0];
3307
3308 if (HMIsEnabled(pVM))
3309 {
3310 if (pVM->hm.s.vmx.fSupported)
3311 pHlp->pfnPrintf(pHlp, "CPU[%u]: VT-x info:\n", pVCpu->idCpu);
3312 else
3313 pHlp->pfnPrintf(pHlp, "CPU[%u]: AMD-V info:\n", pVCpu->idCpu);
3314 pHlp->pfnPrintf(pHlp, " HM error = %#x (%u)\n", pVCpu->hm.s.u32HMError, pVCpu->hm.s.u32HMError);
3315 pHlp->pfnPrintf(pHlp, " rcLastExitToR3 = %Rrc\n", pVCpu->hm.s.rcLastExitToR3);
3316 if (pVM->hm.s.vmx.fSupported)
3317 {
3318 PCVMXVMCSINFOSHARED pVmcsInfoShared = hmGetVmxActiveVmcsInfoShared(pVCpu);
3319 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3320 bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3;
3321
3322 pHlp->pfnPrintf(pHlp, " %s VMCS active\n", fNstGstVmcsActive ? "Nested-guest" : "Guest");
3323 pHlp->pfnPrintf(pHlp, " Real-on-v86 active = %RTbool\n", fRealOnV86Active);
3324 if (fRealOnV86Active)
3325 {
3326 pHlp->pfnPrintf(pHlp, " EFlags = %#x\n", pVmcsInfoShared->RealMode.Eflags.u32);
3327 pHlp->pfnPrintf(pHlp, " Attr CS = %#x\n", pVmcsInfoShared->RealMode.AttrCS.u);
3328 pHlp->pfnPrintf(pHlp, " Attr SS = %#x\n", pVmcsInfoShared->RealMode.AttrSS.u);
3329 pHlp->pfnPrintf(pHlp, " Attr DS = %#x\n", pVmcsInfoShared->RealMode.AttrDS.u);
3330 pHlp->pfnPrintf(pHlp, " Attr ES = %#x\n", pVmcsInfoShared->RealMode.AttrES.u);
3331 pHlp->pfnPrintf(pHlp, " Attr FS = %#x\n", pVmcsInfoShared->RealMode.AttrFS.u);
3332 pHlp->pfnPrintf(pHlp, " Attr GS = %#x\n", pVmcsInfoShared->RealMode.AttrGS.u);
3333 }
3334 }
3335 }
3336 else
3337 pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
3338}
3339
3340
3341/**
3342 * Displays the HM Last-Branch-Record info. for the guest.
3343 *
3344 * @param pVM The cross context VM structure.
3345 * @param pHlp The info helper functions.
3346 * @param pszArgs Arguments, ignored.
3347 */
3348static DECLCALLBACK(void) hmR3InfoLbr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3349{
3350 NOREF(pszArgs);
3351 PVMCPU pVCpu = VMMGetCpu(pVM);
3352 if (!pVCpu)
3353 pVCpu = pVM->apCpusR3[0];
3354
3355 if (!HMIsEnabled(pVM))
3356 pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
3357 else if (HMIsVmxActive(pVM))
3358 {
3359 if (pVM->hm.s.vmx.fLbrCfg)
3360 {
3361 PCVMXVMCSINFOSHARED pVmcsInfoShared = hmGetVmxActiveVmcsInfoShared(pVCpu);
3362 uint32_t const cLbrStack = pVM->hm.s.ForR3.vmx.idLbrFromIpMsrLast - pVM->hm.s.ForR3.vmx.idLbrFromIpMsrFirst + 1;
3363
3364 /** @todo r=ramshankar: The index technically varies depending on the CPU, but
3365 * 0xf should cover everything we support thus far. Fix if necessary
3366 * later. */
3367 uint32_t const idxTopOfStack = pVmcsInfoShared->u64LbrTosMsr & 0xf;
3368 if (idxTopOfStack > cLbrStack)
3369 {
3370 pHlp->pfnPrintf(pHlp, "Top-of-stack LBR MSR seems corrupt (index=%u, msr=%#RX64) expected index < %u\n",
3371 idxTopOfStack, pVmcsInfoShared->u64LbrTosMsr, cLbrStack);
3372 return;
3373 }
3374
3375 /*
3376 * Dump the circular buffer of LBR records starting from the most recent record (contained in idxTopOfStack).
3377 */
3378 pHlp->pfnPrintf(pHlp, "CPU[%u]: LBRs (most-recent first)\n", pVCpu->idCpu);
3379 uint32_t idxCurrent = idxTopOfStack;
3380 Assert(idxTopOfStack < cLbrStack);
3381 Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr) <= cLbrStack);
3382 Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrToIpMsr) <= cLbrStack);
3383 for (;;)
3384 {
3385 if (pVM->hm.s.ForR3.vmx.idLbrToIpMsrFirst)
3386 pHlp->pfnPrintf(pHlp, " Branch (%2u): From IP=%#016RX64 - To IP=%#016RX64\n", idxCurrent,
3387 pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent], pVmcsInfoShared->au64LbrToIpMsr[idxCurrent]);
3388 else
3389 pHlp->pfnPrintf(pHlp, " Branch (%2u): LBR=%#RX64\n", idxCurrent, pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent]);
3390
3391 idxCurrent = (idxCurrent - 1) % cLbrStack;
3392 if (idxCurrent == idxTopOfStack)
3393 break;
3394 }
3395 }
3396 else
3397 pHlp->pfnPrintf(pHlp, "VM not configured to record LBRs for the guest\n");
3398 }
3399 else
3400 {
3401 Assert(HMIsSvmActive(pVM));
3402 /** @todo SVM: LBRs (get them from VMCB if possible). */
3403 pHlp->pfnPrintf(pHlp, "SVM LBR not implemented.\n");
3404 }
3405}
3406
3407
3408/**
3409 * Displays the HM pending event.
3410 *
3411 * @param pVM The cross context VM structure.
3412 * @param pHlp The info helper functions.
3413 * @param pszArgs Arguments, ignored.
3414 */
3415static DECLCALLBACK(void) hmR3InfoEventPending(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3416{
3417 NOREF(pszArgs);
3418 PVMCPU pVCpu = VMMGetCpu(pVM);
3419 if (!pVCpu)
3420 pVCpu = pVM->apCpusR3[0];
3421
3422 if (HMIsEnabled(pVM))
3423 {
3424 pHlp->pfnPrintf(pHlp, "CPU[%u]: HM event (fPending=%RTbool)\n", pVCpu->idCpu, pVCpu->hm.s.Event.fPending);
3425 if (pVCpu->hm.s.Event.fPending)
3426 {
3427 pHlp->pfnPrintf(pHlp, " u64IntInfo = %#RX64\n", pVCpu->hm.s.Event.u64IntInfo);
3428 pHlp->pfnPrintf(pHlp, " u32ErrCode = %#RX64\n", pVCpu->hm.s.Event.u32ErrCode);
3429 pHlp->pfnPrintf(pHlp, " cbInstr = %u bytes\n", pVCpu->hm.s.Event.cbInstr);
3430 pHlp->pfnPrintf(pHlp, " GCPtrFaultAddress = %#RGp\n", pVCpu->hm.s.Event.GCPtrFaultAddress);
3431 }
3432 }
3433 else
3434 pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
3435}
3436
3437
3438/**
3439 * Displays the SVM nested-guest VMCB cache.
3440 *
3441 * @param pVM The cross context VM structure.
3442 * @param pHlp The info helper functions.
3443 * @param pszArgs Arguments, ignored.
3444 */
3445static DECLCALLBACK(void) hmR3InfoSvmNstGstVmcbCache(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3446{
3447 NOREF(pszArgs);
3448 PVMCPU pVCpu = VMMGetCpu(pVM);
3449 if (!pVCpu)
3450 pVCpu = pVM->apCpusR3[0];
3451
3452 bool const fSvmEnabled = HMR3IsSvmEnabled(pVM->pUVM);
3453 if ( fSvmEnabled
3454 && pVM->cpum.ro.GuestFeatures.fSvm)
3455 {
3456 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
3457 pHlp->pfnPrintf(pHlp, "CPU[%u]: HM SVM nested-guest VMCB cache\n", pVCpu->idCpu);
3458 pHlp->pfnPrintf(pHlp, " fCacheValid = %#RTbool\n", pVmcbNstGstCache->fCacheValid);
3459 pHlp->pfnPrintf(pHlp, " u16InterceptRdCRx = %#RX16\n", pVmcbNstGstCache->u16InterceptRdCRx);
3460 pHlp->pfnPrintf(pHlp, " u16InterceptWrCRx = %#RX16\n", pVmcbNstGstCache->u16InterceptWrCRx);
3461 pHlp->pfnPrintf(pHlp, " u16InterceptRdDRx = %#RX16\n", pVmcbNstGstCache->u16InterceptRdDRx);
3462 pHlp->pfnPrintf(pHlp, " u16InterceptWrDRx = %#RX16\n", pVmcbNstGstCache->u16InterceptWrDRx);
3463 pHlp->pfnPrintf(pHlp, " u16PauseFilterThreshold = %#RX16\n", pVmcbNstGstCache->u16PauseFilterThreshold);
3464 pHlp->pfnPrintf(pHlp, " u16PauseFilterCount = %#RX16\n", pVmcbNstGstCache->u16PauseFilterCount);
3465 pHlp->pfnPrintf(pHlp, " u32InterceptXcpt = %#RX32\n", pVmcbNstGstCache->u32InterceptXcpt);
3466 pHlp->pfnPrintf(pHlp, " u64InterceptCtrl = %#RX64\n", pVmcbNstGstCache->u64InterceptCtrl);
3467 pHlp->pfnPrintf(pHlp, " u64TSCOffset = %#RX64\n", pVmcbNstGstCache->u64TSCOffset);
3468 pHlp->pfnPrintf(pHlp, " fVIntrMasking = %RTbool\n", pVmcbNstGstCache->fVIntrMasking);
3469 pHlp->pfnPrintf(pHlp, " fNestedPaging = %RTbool\n", pVmcbNstGstCache->fNestedPaging);
3470 pHlp->pfnPrintf(pHlp, " fLbrVirt = %RTbool\n", pVmcbNstGstCache->fLbrVirt);
3471 }
3472 else
3473 {
3474 if (!fSvmEnabled)
3475 pHlp->pfnPrintf(pHlp, "HM SVM is not enabled for this VM!\n");
3476 else
3477 pHlp->pfnPrintf(pHlp, "SVM feature is not exposed to the guest!\n");
3478 }
3479}
3480
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette