VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/HM.cpp@ 92780

Last change on this file since 92780 was 92703, checked in by vboxsync, 3 years ago

VMM: Trying to cope without the support driver... bugref:10138

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 155.7 KB
Line 
1/* $Id: HM.cpp 92703 2021-12-02 12:45:58Z vboxsync $ */
2/** @file
3 * HM - Intel/AMD VM Hardware Support Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_hm HM - Hardware Assisted Virtualization Manager
19 *
20 * The HM manages guest execution using the VT-x and AMD-V CPU hardware
21 * extensions.
22 *
23 * {summary of what HM does}
24 *
25 * Hardware assisted virtualization manager was originally abbreviated HWACCM,
26 * however that was cumbersome to write and parse for such a central component,
27 * so it was shortened to HM when refactoring the code in the 4.3 development
28 * cycle.
29 *
30 * {add sections with more details}
31 *
32 * @sa @ref grp_hm
33 */
34
35
36/*********************************************************************************************************************************
37* Header Files *
38*********************************************************************************************************************************/
39#define LOG_GROUP LOG_GROUP_HM
40#define VMCPU_INCL_CPUM_GST_CTX
41#include <VBox/vmm/cpum.h>
42#include <VBox/vmm/stam.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/pdmapi.h>
45#include <VBox/vmm/pgm.h>
46#include <VBox/vmm/ssm.h>
47#include <VBox/vmm/gim.h>
48#include <VBox/vmm/trpm.h>
49#include <VBox/vmm/dbgf.h>
50#include <VBox/vmm/iom.h>
51#include <VBox/vmm/iem.h>
52#include <VBox/vmm/selm.h>
53#include <VBox/vmm/nem.h>
54#include <VBox/vmm/hm_vmx.h>
55#include <VBox/vmm/hm_svm.h>
56#include "HMInternal.h"
57#include <VBox/vmm/vmcc.h>
58#include <VBox/err.h>
59#include <VBox/param.h>
60
61#include <iprt/assert.h>
62#include <VBox/log.h>
63#include <iprt/asm.h>
64#include <iprt/asm-amd64-x86.h>
65#include <iprt/env.h>
66#include <iprt/thread.h>
67
68
69/*********************************************************************************************************************************
70* Defined Constants And Macros *
71*********************************************************************************************************************************/
72/** @def HMVMX_REPORT_FEAT
73 * Reports VT-x feature to the release log.
74 *
75 * @param a_uAllowed1 Mask of allowed-1 feature bits.
76 * @param a_uAllowed0 Mask of allowed-0 feature bits.
77 * @param a_StrDesc The description string to report.
78 * @param a_Featflag Mask of the feature to report.
79 */
80#define HMVMX_REPORT_FEAT(a_uAllowed1, a_uAllowed0, a_StrDesc, a_Featflag) \
81 do { \
82 if ((a_uAllowed1) & (a_Featflag)) \
83 { \
84 if ((a_uAllowed0) & (a_Featflag)) \
85 LogRel(("HM: " a_StrDesc " (must be set)\n")); \
86 else \
87 LogRel(("HM: " a_StrDesc "\n")); \
88 } \
89 else \
90 LogRel(("HM: " a_StrDesc " (must be cleared)\n")); \
91 } while (0)
92
93/** @def HMVMX_REPORT_ALLOWED_FEAT
94 * Reports an allowed VT-x feature to the release log.
95 *
96 * @param a_uAllowed1 Mask of allowed-1 feature bits.
97 * @param a_StrDesc The description string to report.
98 * @param a_FeatFlag Mask of the feature to report.
99 */
100#define HMVMX_REPORT_ALLOWED_FEAT(a_uAllowed1, a_StrDesc, a_FeatFlag) \
101 do { \
102 if ((a_uAllowed1) & (a_FeatFlag)) \
103 LogRel(("HM: " a_StrDesc "\n")); \
104 else \
105 LogRel(("HM: " a_StrDesc " not supported\n")); \
106 } while (0)
107
108/** @def HMVMX_REPORT_MSR_CAP
109 * Reports MSR feature capability.
110 *
111 * @param a_MsrCaps Mask of MSR feature bits.
112 * @param a_StrDesc The description string to report.
113 * @param a_fCap Mask of the feature to report.
114 */
115#define HMVMX_REPORT_MSR_CAP(a_MsrCaps, a_StrDesc, a_fCap) \
116 do { \
117 if ((a_MsrCaps) & (a_fCap)) \
118 LogRel(("HM: " a_StrDesc "\n")); \
119 } while (0)
120
121/** @def HMVMX_LOGREL_FEAT
122 * Dumps a feature flag from a bitmap of features to the release log.
123 *
124 * @param a_fVal The value of all the features.
125 * @param a_fMask The specific bitmask of the feature.
126 */
127#define HMVMX_LOGREL_FEAT(a_fVal, a_fMask) \
128 do { \
129 if ((a_fVal) & (a_fMask)) \
130 LogRel(("HM: %s\n", #a_fMask)); \
131 } while (0)
132
133
134/*********************************************************************************************************************************
135* Internal Functions *
136*********************************************************************************************************************************/
137static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM);
138static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
139static DECLCALLBACK(void) hmR3InfoSvmNstGstVmcbCache(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
140static DECLCALLBACK(void) hmR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
141static DECLCALLBACK(void) hmR3InfoEventPending(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
142static DECLCALLBACK(void) hmR3InfoLbr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
143static int hmR3InitFinalizeR3(PVM pVM);
144static int hmR3InitFinalizeR0(PVM pVM);
145static int hmR3InitFinalizeR0Intel(PVM pVM);
146static int hmR3InitFinalizeR0Amd(PVM pVM);
147static int hmR3TermCPU(PVM pVM);
148
149
150#ifdef VBOX_WITH_STATISTICS
151/**
152 * Returns the name of the hardware exception.
153 *
154 * @returns The name of the hardware exception.
155 * @param uVector The exception vector.
156 */
157static const char *hmR3GetXcptName(uint8_t uVector)
158{
159 switch (uVector)
160 {
161 case X86_XCPT_DE: return "#DE";
162 case X86_XCPT_DB: return "#DB";
163 case X86_XCPT_NMI: return "#NMI";
164 case X86_XCPT_BP: return "#BP";
165 case X86_XCPT_OF: return "#OF";
166 case X86_XCPT_BR: return "#BR";
167 case X86_XCPT_UD: return "#UD";
168 case X86_XCPT_NM: return "#NM";
169 case X86_XCPT_DF: return "#DF";
170 case X86_XCPT_CO_SEG_OVERRUN: return "#CO_SEG_OVERRUN";
171 case X86_XCPT_TS: return "#TS";
172 case X86_XCPT_NP: return "#NP";
173 case X86_XCPT_SS: return "#SS";
174 case X86_XCPT_GP: return "#GP";
175 case X86_XCPT_PF: return "#PF";
176 case X86_XCPT_MF: return "#MF";
177 case X86_XCPT_AC: return "#AC";
178 case X86_XCPT_MC: return "#MC";
179 case X86_XCPT_XF: return "#XF";
180 case X86_XCPT_VE: return "#VE";
181 case X86_XCPT_CP: return "#CP";
182 case X86_XCPT_VC: return "#VC";
183 case X86_XCPT_SX: return "#SX";
184 }
185 return "Reserved";
186}
187#endif /* VBOX_WITH_STATISTICS */
188
189
190/**
191 * Initializes the HM.
192 *
193 * This is the very first component to really do init after CFGM so that we can
194 * establish the predominant execution engine for the VM prior to initializing
195 * other modules. It takes care of NEM initialization if needed (HM disabled or
196 * not available in HW).
197 *
198 * If VT-x or AMD-V hardware isn't available, HM will try fall back on a native
199 * hypervisor API via NEM, and then back on raw-mode if that isn't available
200 * either. The fallback to raw-mode will not happen if /HM/HMForced is set
201 * (like for guest using SMP or 64-bit as well as for complicated guest like OS
202 * X, OS/2 and others).
203 *
204 * Note that a lot of the set up work is done in ring-0 and thus postponed till
205 * the ring-3 and ring-0 callback to HMR3InitCompleted.
206 *
207 * @returns VBox status code.
208 * @param pVM The cross context VM structure.
209 *
210 * @remarks Be careful with what we call here, since most of the VMM components
211 * are uninitialized.
212 */
213VMMR3_INT_DECL(int) HMR3Init(PVM pVM)
214{
215 LogFlowFunc(("\n"));
216
217 /*
218 * Assert alignment and sizes.
219 */
220 AssertCompileMemberAlignment(VM, hm.s, 32);
221 AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding));
222
223 /*
224 * Register the saved state data unit.
225 */
226 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HM_SAVED_STATE_VERSION, sizeof(HM),
227 NULL, NULL, NULL,
228 NULL, hmR3Save, NULL,
229 NULL, hmR3Load, NULL);
230 if (RT_FAILURE(rc))
231 return rc;
232
233 /*
234 * Register info handlers.
235 */
236 rc = DBGFR3InfoRegisterInternalEx(pVM, "hm", "Dumps HM info.", hmR3Info, DBGFINFO_FLAGS_ALL_EMTS);
237 AssertRCReturn(rc, rc);
238
239 rc = DBGFR3InfoRegisterInternalEx(pVM, "hmeventpending", "Dumps the pending HM event.", hmR3InfoEventPending,
240 DBGFINFO_FLAGS_ALL_EMTS);
241 AssertRCReturn(rc, rc);
242
243 rc = DBGFR3InfoRegisterInternalEx(pVM, "svmvmcbcache", "Dumps the HM SVM nested-guest VMCB cache.",
244 hmR3InfoSvmNstGstVmcbCache, DBGFINFO_FLAGS_ALL_EMTS);
245 AssertRCReturn(rc, rc);
246
247 rc = DBGFR3InfoRegisterInternalEx(pVM, "lbr", "Dumps the HM LBR info.", hmR3InfoLbr, DBGFINFO_FLAGS_ALL_EMTS);
248 AssertRCReturn(rc, rc);
249
250 /*
251 * Read configuration.
252 */
253 PCFGMNODE pCfgHm = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HM/");
254
255 /*
256 * Validate the HM settings.
257 */
258 rc = CFGMR3ValidateConfig(pCfgHm, "/HM/",
259 "HMForced" /* implied 'true' these days */
260 "|UseNEMInstead"
261 "|FallbackToNEM"
262 "|EnableNestedPaging"
263 "|EnableUX"
264 "|EnableLargePages"
265 "|EnableVPID"
266 "|IBPBOnVMExit"
267 "|IBPBOnVMEntry"
268 "|SpecCtrlByHost"
269 "|L1DFlushOnSched"
270 "|L1DFlushOnVMEntry"
271 "|MDSClearOnSched"
272 "|MDSClearOnVMEntry"
273 "|TPRPatchingEnabled"
274 "|64bitEnabled"
275 "|Exclusive"
276 "|MaxResumeLoops"
277 "|VmxPleGap"
278 "|VmxPleWindow"
279 "|VmxLbr"
280 "|UseVmxPreemptTimer"
281 "|SvmPauseFilter"
282 "|SvmPauseFilterThreshold"
283 "|SvmVirtVmsaveVmload"
284 "|SvmVGif"
285 "|LovelyMesaDrvWorkaround",
286 "" /* pszValidNodes */, "HM" /* pszWho */, 0 /* uInstance */);
287 if (RT_FAILURE(rc))
288 return rc;
289
290 /** @cfgm{/HM/HMForced, bool, false}
291 * Forces hardware virtualization, no falling back on raw-mode. HM must be
292 * enabled, i.e. /HMEnabled must be true. */
293 bool fHMForced;
294 AssertRelease(pVM->fHMEnabled);
295 fHMForced = true;
296
297 /** @cfgm{/HM/UseNEMInstead, bool, true}
298 * Don't use HM, use NEM instead. */
299 bool fUseNEMInstead = false;
300 rc = CFGMR3QueryBoolDef(pCfgHm, "UseNEMInstead", &fUseNEMInstead, false);
301 AssertRCReturn(rc, rc);
302 if (fUseNEMInstead && pVM->fHMEnabled)
303 {
304 LogRel(("HM: Setting fHMEnabled to false because fUseNEMInstead is set.\n"));
305 pVM->fHMEnabled = false;
306 }
307
308 /** @cfgm{/HM/FallbackToNEM, bool, true}
309 * Enables fallback on NEM. */
310 bool fFallbackToNEM = true;
311 rc = CFGMR3QueryBoolDef(pCfgHm, "FallbackToNEM", &fFallbackToNEM, true);
312 AssertRCReturn(rc, rc);
313
314 /** @cfgm{/HM/EnableNestedPaging, bool, false}
315 * Enables nested paging (aka extended page tables). */
316 bool fAllowNestedPaging = false;
317 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableNestedPaging", &fAllowNestedPaging, false);
318 AssertRCReturn(rc, rc);
319
320 /** @cfgm{/HM/EnableUX, bool, true}
321 * Enables the VT-x unrestricted execution feature. */
322 bool fAllowUnrestricted = true;
323 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableUX", &fAllowUnrestricted, true);
324 AssertRCReturn(rc, rc);
325
326 /** @cfgm{/HM/EnableLargePages, bool, false}
327 * Enables using large pages (2 MB) for guest memory, thus saving on (nested)
328 * page table walking and maybe better TLB hit rate in some cases. */
329 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableLargePages", &pVM->hm.s.fLargePages, false);
330 AssertRCReturn(rc, rc);
331
332 /** @cfgm{/HM/EnableVPID, bool, false}
333 * Enables the VT-x VPID feature. */
334 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableVPID", &pVM->hm.s.vmx.fAllowVpid, false);
335 AssertRCReturn(rc, rc);
336
337 /** @cfgm{/HM/TPRPatchingEnabled, bool, false}
338 * Enables TPR patching for 32-bit windows guests with IO-APIC. */
339 rc = CFGMR3QueryBoolDef(pCfgHm, "TPRPatchingEnabled", &pVM->hm.s.fTprPatchingAllowed, false);
340 AssertRCReturn(rc, rc);
341
342 /** @cfgm{/HM/64bitEnabled, bool, 32-bit:false, 64-bit:true}
343 * Enables AMD64 cpu features.
344 * On 32-bit hosts this isn't default and require host CPU support. 64-bit hosts
345 * already have the support. */
346#ifdef VBOX_WITH_64_BITS_GUESTS
347 rc = CFGMR3QueryBoolDef(pCfgHm, "64bitEnabled", &pVM->hm.s.fAllow64BitGuestsCfg, HC_ARCH_BITS == 64);
348 AssertLogRelRCReturn(rc, rc);
349#else
350 pVM->hm.s.fAllow64BitGuestsCfg = false;
351#endif
352
353 /** @cfgm{/HM/VmxPleGap, uint32_t, 0}
354 * The pause-filter exiting gap in TSC ticks. When the number of ticks between
355 * two successive PAUSE instructions exceeds VmxPleGap, the CPU considers the
356 * latest PAUSE instruction to be start of a new PAUSE loop.
357 */
358 rc = CFGMR3QueryU32Def(pCfgHm, "VmxPleGap", &pVM->hm.s.vmx.cPleGapTicks, 0);
359 AssertRCReturn(rc, rc);
360
361 /** @cfgm{/HM/VmxPleWindow, uint32_t, 0}
362 * The pause-filter exiting window in TSC ticks. When the number of ticks
363 * between the current PAUSE instruction and first PAUSE of a loop exceeds
364 * VmxPleWindow, a VM-exit is triggered.
365 *
366 * Setting VmxPleGap and VmxPleGap to 0 disables pause-filter exiting.
367 */
368 rc = CFGMR3QueryU32Def(pCfgHm, "VmxPleWindow", &pVM->hm.s.vmx.cPleWindowTicks, 0);
369 AssertRCReturn(rc, rc);
370
371 /** @cfgm{/HM/VmxLbr, bool, false}
372 * Whether to enable LBR for the guest. This is disabled by default as it's only
373 * useful while debugging and enabling it causes a noticeable performance hit. */
374 rc = CFGMR3QueryBoolDef(pCfgHm, "VmxLbr", &pVM->hm.s.vmx.fLbrCfg, false);
375 AssertRCReturn(rc, rc);
376
377 /** @cfgm{/HM/SvmPauseFilterCount, uint16_t, 0}
378 * A counter that is decrement each time a PAUSE instruction is executed by the
379 * guest. When the counter is 0, a \#VMEXIT is triggered.
380 *
381 * Setting SvmPauseFilterCount to 0 disables pause-filter exiting.
382 */
383 rc = CFGMR3QueryU16Def(pCfgHm, "SvmPauseFilter", &pVM->hm.s.svm.cPauseFilter, 0);
384 AssertRCReturn(rc, rc);
385
386 /** @cfgm{/HM/SvmPauseFilterThreshold, uint16_t, 0}
387 * The pause filter threshold in ticks. When the elapsed time (in ticks) between
388 * two successive PAUSE instructions exceeds SvmPauseFilterThreshold, the
389 * PauseFilter count is reset to its initial value. However, if PAUSE is
390 * executed PauseFilter times within PauseFilterThreshold ticks, a VM-exit will
391 * be triggered.
392 *
393 * Requires SvmPauseFilterCount to be non-zero for pause-filter threshold to be
394 * activated.
395 */
396 rc = CFGMR3QueryU16Def(pCfgHm, "SvmPauseFilterThreshold", &pVM->hm.s.svm.cPauseFilterThresholdTicks, 0);
397 AssertRCReturn(rc, rc);
398
399 /** @cfgm{/HM/SvmVirtVmsaveVmload, bool, true}
400 * Whether to make use of virtualized VMSAVE/VMLOAD feature of the CPU if it's
401 * available. */
402 rc = CFGMR3QueryBoolDef(pCfgHm, "SvmVirtVmsaveVmload", &pVM->hm.s.svm.fVirtVmsaveVmload, true);
403 AssertRCReturn(rc, rc);
404
405 /** @cfgm{/HM/SvmVGif, bool, true}
406 * Whether to make use of Virtual GIF (Global Interrupt Flag) feature of the CPU
407 * if it's available. */
408 rc = CFGMR3QueryBoolDef(pCfgHm, "SvmVGif", &pVM->hm.s.svm.fVGif, true);
409 AssertRCReturn(rc, rc);
410
411 /** @cfgm{/HM/SvmLbrVirt, bool, false}
412 * Whether to make use of the LBR virtualization feature of the CPU if it's
413 * available. This is disabled by default as it's only useful while debugging
414 * and enabling it causes a small hit to performance. */
415 rc = CFGMR3QueryBoolDef(pCfgHm, "SvmLbrVirt", &pVM->hm.s.svm.fLbrVirt, false);
416 AssertRCReturn(rc, rc);
417
418 /** @cfgm{/HM/Exclusive, bool}
419 * Determines the init method for AMD-V and VT-x. If set to true, HM will do a
420 * global init for each host CPU. If false, we do local init each time we wish
421 * to execute guest code.
422 *
423 * On Windows, default is false due to the higher risk of conflicts with other
424 * hypervisors.
425 *
426 * On Mac OS X, this setting is ignored since the code does not handle local
427 * init when it utilizes the OS provided VT-x function, SUPR0EnableVTx().
428 */
429#if defined(RT_OS_DARWIN)
430 pVM->hm.s.fGlobalInit = true;
431#else
432 rc = CFGMR3QueryBoolDef(pCfgHm, "Exclusive", &pVM->hm.s.fGlobalInit,
433# if defined(RT_OS_WINDOWS)
434 false
435# else
436 true
437# endif
438 );
439 AssertLogRelRCReturn(rc, rc);
440#endif
441
442 /** @cfgm{/HM/MaxResumeLoops, uint32_t}
443 * The number of times to resume guest execution before we forcibly return to
444 * ring-3. The return value of RTThreadPreemptIsPendingTrusty in ring-0
445 * determines the default value. */
446 rc = CFGMR3QueryU32Def(pCfgHm, "MaxResumeLoops", &pVM->hm.s.cMaxResumeLoopsCfg, 0 /* set by R0 later */);
447 AssertLogRelRCReturn(rc, rc);
448
449 /** @cfgm{/HM/UseVmxPreemptTimer, bool}
450 * Whether to make use of the VMX-preemption timer feature of the CPU if it's
451 * available. */
452 rc = CFGMR3QueryBoolDef(pCfgHm, "UseVmxPreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimerCfg, true);
453 AssertLogRelRCReturn(rc, rc);
454
455 /** @cfgm{/HM/IBPBOnVMExit, bool}
456 * Costly paranoia setting. */
457 rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMExit", &pVM->hm.s.fIbpbOnVmExit, false);
458 AssertLogRelRCReturn(rc, rc);
459
460 /** @cfgm{/HM/IBPBOnVMEntry, bool}
461 * Costly paranoia setting. */
462 rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMEntry", &pVM->hm.s.fIbpbOnVmEntry, false);
463 AssertLogRelRCReturn(rc, rc);
464
465 /** @cfgm{/HM/L1DFlushOnSched, bool, true}
466 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */
467 rc = CFGMR3QueryBoolDef(pCfgHm, "L1DFlushOnSched", &pVM->hm.s.fL1dFlushOnSched, true);
468 AssertLogRelRCReturn(rc, rc);
469
470 /** @cfgm{/HM/L1DFlushOnVMEntry, bool}
471 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */
472 rc = CFGMR3QueryBoolDef(pCfgHm, "L1DFlushOnVMEntry", &pVM->hm.s.fL1dFlushOnVmEntry, false);
473 AssertLogRelRCReturn(rc, rc);
474
475 /* Disable L1DFlushOnSched if L1DFlushOnVMEntry is enabled. */
476 if (pVM->hm.s.fL1dFlushOnVmEntry)
477 pVM->hm.s.fL1dFlushOnSched = false;
478
479 /** @cfgm{/HM/SpecCtrlByHost, bool}
480 * Another expensive paranoia setting. */
481 rc = CFGMR3QueryBoolDef(pCfgHm, "SpecCtrlByHost", &pVM->hm.s.fSpecCtrlByHost, false);
482 AssertLogRelRCReturn(rc, rc);
483
484 /** @cfgm{/HM/MDSClearOnSched, bool, true}
485 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround,
486 * ignored on CPUs that aren't affected. */
487 rc = CFGMR3QueryBoolDef(pCfgHm, "MDSClearOnSched", &pVM->hm.s.fMdsClearOnSched, true);
488 AssertLogRelRCReturn(rc, rc);
489
490 /** @cfgm{/HM/MDSClearOnVmEntry, bool, false}
491 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround,
492 * ignored on CPUs that aren't affected. */
493 rc = CFGMR3QueryBoolDef(pCfgHm, "MDSClearOnVmEntry", &pVM->hm.s.fMdsClearOnVmEntry, false);
494 AssertLogRelRCReturn(rc, rc);
495
496 /* Disable MDSClearOnSched if MDSClearOnVmEntry is enabled. */
497 if (pVM->hm.s.fMdsClearOnVmEntry)
498 pVM->hm.s.fMdsClearOnSched = false;
499
500 /** @cfgm{/HM/LovelyMesaDrvWorkaround,bool}
501 * Workaround for mesa vmsvga 3d driver making incorrect assumptions about
502 * the hypervisor it is running under. */
503 bool fMesaWorkaround;
504 rc = CFGMR3QueryBoolDef(pCfgHm, "LovelyMesaDrvWorkaround", &fMesaWorkaround, false);
505 AssertLogRelRCReturn(rc, rc);
506 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
507 {
508 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
509 pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv = fMesaWorkaround;
510 }
511
512 /*
513 * Check if VT-x or AMD-v support according to the users wishes.
514 */
515 /** @todo SUPR3QueryVTCaps won't catch VERR_VMX_IN_VMX_ROOT_MODE or
516 * VERR_SVM_IN_USE. */
517 if (pVM->fHMEnabled)
518 {
519 uint32_t fCaps;
520 rc = SUPR3QueryVTCaps(&fCaps);
521 if (RT_SUCCESS(rc))
522 {
523 if (fCaps & SUPVTCAPS_AMD_V)
524 {
525 pVM->hm.s.svm.fSupported = true;
526 LogRel(("HM: HMR3Init: AMD-V%s\n", fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : ""));
527 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_HW_VIRT);
528 }
529 else if (fCaps & SUPVTCAPS_VT_X)
530 {
531 const char *pszWhy;
532 rc = SUPR3QueryVTxSupported(&pszWhy);
533 if (RT_SUCCESS(rc))
534 {
535 pVM->hm.s.vmx.fSupported = true;
536 LogRel(("HM: HMR3Init: VT-x%s%s%s\n",
537 fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : "",
538 fCaps & SUPVTCAPS_VTX_UNRESTRICTED_GUEST ? " and unrestricted guest execution" : "",
539 (fCaps & (SUPVTCAPS_NESTED_PAGING | SUPVTCAPS_VTX_UNRESTRICTED_GUEST)) ? " hw support" : ""));
540 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_HW_VIRT);
541 }
542 else
543 {
544 /*
545 * Before failing, try fallback to NEM if we're allowed to do that.
546 */
547 pVM->fHMEnabled = false;
548 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET);
549 if (fFallbackToNEM)
550 {
551 LogRel(("HM: HMR3Init: Attempting fall back to NEM: The host kernel does not support VT-x - %s\n", pszWhy));
552 int rc2 = NEMR3Init(pVM, true /*fFallback*/, fHMForced);
553
554 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
555 if ( RT_SUCCESS(rc2)
556 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET)
557 rc = VINF_SUCCESS;
558 }
559 if (RT_FAILURE(rc))
560 return VMSetError(pVM, rc, RT_SRC_POS, "The host kernel does not support VT-x: %s\n", pszWhy);
561 }
562 }
563 else
564 AssertLogRelMsgFailedReturn(("SUPR3QueryVTCaps didn't return either AMD-V or VT-x flag set (%#x)!\n", fCaps),
565 VERR_INTERNAL_ERROR_5);
566
567 /*
568 * Disable nested paging and unrestricted guest execution now if they're
569 * configured so that CPUM can make decisions based on our configuration.
570 */
571 if ( fAllowNestedPaging
572 && (fCaps & SUPVTCAPS_NESTED_PAGING))
573 {
574 pVM->hm.s.fNestedPagingCfg = true;
575 if (fCaps & SUPVTCAPS_VT_X)
576 {
577 if ( fAllowUnrestricted
578 && (fCaps & SUPVTCAPS_VTX_UNRESTRICTED_GUEST))
579 pVM->hm.s.vmx.fUnrestrictedGuestCfg = true;
580 else
581 Assert(!pVM->hm.s.vmx.fUnrestrictedGuestCfg);
582 }
583 }
584 else
585 Assert(!pVM->hm.s.fNestedPagingCfg);
586 }
587 else
588 {
589 const char *pszMsg;
590 switch (rc)
591 {
592 case VERR_UNSUPPORTED_CPU: pszMsg = "Unknown CPU, VT-x or AMD-v features cannot be ascertained"; break;
593 case VERR_VMX_NO_VMX: pszMsg = "VT-x is not available"; break;
594 case VERR_VMX_MSR_VMX_DISABLED: pszMsg = "VT-x is disabled in the BIOS"; break;
595 case VERR_VMX_MSR_ALL_VMX_DISABLED: pszMsg = "VT-x is disabled in the BIOS for all CPU modes"; break;
596 case VERR_VMX_MSR_LOCKING_FAILED: pszMsg = "Failed to enable and lock VT-x features"; break;
597 case VERR_SVM_NO_SVM: pszMsg = "AMD-V is not available"; break;
598 case VERR_SVM_DISABLED: pszMsg = "AMD-V is disabled in the BIOS (or by the host OS)"; break;
599 case VERR_SUP_DRIVERLESS: pszMsg = "Driverless mode"; break;
600 default:
601 return VMSetError(pVM, rc, RT_SRC_POS, "SUPR3QueryVTCaps failed with %Rrc", rc);
602 }
603
604 /*
605 * Before failing, try fallback to NEM if we're allowed to do that.
606 */
607 pVM->fHMEnabled = false;
608 if (fFallbackToNEM)
609 {
610 LogRel(("HM: HMR3Init: Attempting fall back to NEM: %s\n", pszMsg));
611 int rc2 = NEMR3Init(pVM, true /*fFallback*/, fHMForced);
612 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
613 if ( RT_SUCCESS(rc2)
614 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET)
615 {
616 rc = VINF_SUCCESS;
617
618 /* For some reason, HM is in charge or large pages. Make sure to enable them: */
619 PGMSetLargePageUsage(pVM, pVM->hm.s.fLargePages);
620 }
621 }
622 if (RT_FAILURE(rc))
623 return VM_SET_ERROR(pVM, rc, pszMsg);
624 }
625 }
626 else
627 {
628 /*
629 * Disabled HM mean raw-mode, unless NEM is supposed to be used.
630 */
631 if (fUseNEMInstead)
632 {
633 rc = NEMR3Init(pVM, false /*fFallback*/, true);
634 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
635 if (RT_FAILURE(rc))
636 return rc;
637
638 /* For some reason, HM is in charge or large pages. Make sure to enable them: */
639 PGMSetLargePageUsage(pVM, pVM->hm.s.fLargePages);
640 }
641 if ( pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET
642 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_RAW_MODE
643 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT /* paranoia */)
644 return VM_SET_ERROR(pVM, rc, "Misconfigured VM: No guest execution engine available!");
645 }
646
647 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET);
648 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_RAW_MODE);
649 return VINF_SUCCESS;
650}
651
652
653/**
654 * Initializes HM components after ring-3 phase has been fully initialized.
655 *
656 * @returns VBox status code.
657 * @param pVM The cross context VM structure.
658 */
659static int hmR3InitFinalizeR3(PVM pVM)
660{
661 LogFlowFunc(("\n"));
662
663 if (!HMIsEnabled(pVM))
664 return VINF_SUCCESS;
665
666 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
667 {
668 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
669 pVCpu->hm.s.fActive = false;
670 pVCpu->hm.s.fGIMTrapXcptUD = GIMShouldTrapXcptUD(pVCpu); /* Is safe to call now since GIMR3Init() has completed. */
671 }
672
673 /*
674 * Check if L1D flush is needed/possible.
675 */
676 if ( !pVM->cpum.ro.HostFeatures.fFlushCmd
677 || pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem
678 || pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_End
679 || pVM->cpum.ro.HostFeatures.fArchVmmNeedNotFlushL1d
680 || pVM->cpum.ro.HostFeatures.fArchRdclNo)
681 pVM->hm.s.fL1dFlushOnSched = pVM->hm.s.fL1dFlushOnVmEntry = false;
682
683 /*
684 * Check if MDS flush is needed/possible.
685 * On atoms and knight family CPUs, we will only allow clearing on scheduling.
686 */
687 if ( !pVM->cpum.ro.HostFeatures.fMdsClear
688 || pVM->cpum.ro.HostFeatures.fArchMdsNo)
689 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry = false;
690 else if ( ( pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Atom_Airmount
691 && pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Atom_End)
692 || ( pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Phi_KnightsLanding
693 && pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Phi_End))
694 {
695 if (!pVM->hm.s.fMdsClearOnSched)
696 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry;
697 pVM->hm.s.fMdsClearOnVmEntry = false;
698 }
699 else if ( pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem
700 || pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_End)
701 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry = false;
702
703 /*
704 * Statistics.
705 */
706#ifdef VBOX_WITH_STATISTICS
707 STAM_REG(pVM, &pVM->hm.s.StatTprPatchSuccess, STAMTYPE_COUNTER, "/HM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
708 STAM_REG(pVM, &pVM->hm.s.StatTprPatchFailure, STAMTYPE_COUNTER, "/HM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
709 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccessCr8, STAMTYPE_COUNTER, "/HM/TPR/Replace/SuccessCR8", STAMUNIT_OCCURENCES, "Number of instruction replacements by MOV CR8.");
710 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccessVmc, STAMTYPE_COUNTER, "/HM/TPR/Replace/SuccessVMC", STAMUNIT_OCCURENCES, "Number of instruction replacements by VMMCALL.");
711 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceFailure, STAMTYPE_COUNTER, "/HM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful replace attempts.");
712#endif
713
714 bool const fCpuSupportsVmx = ASMIsIntelCpu() || ASMIsViaCentaurCpu() || ASMIsShanghaiCpu();
715 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
716 {
717 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
718 PHMCPU pHmCpu = &pVCpu->hm.s;
719 int rc;
720
721# define HM_REG_STAT(a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szNmFmt, a_szDesc) do { \
722 rc = STAMR3RegisterF(pVM, a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szDesc, a_szNmFmt, idCpu); \
723 AssertRC(rc); \
724 } while (0)
725# define HM_REG_PROFILE(a_pVar, a_szNmFmt, a_szDesc) \
726 HM_REG_STAT(a_pVar, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, a_szNmFmt, a_szDesc)
727
728#ifdef VBOX_WITH_STATISTICS
729
730 HM_REG_PROFILE(&pHmCpu->StatPoke, "/PROF/CPU%u/HM/Poke", "Profiling of RTMpPokeCpu.");
731 HM_REG_PROFILE(&pHmCpu->StatSpinPoke, "/PROF/CPU%u/HM/PokeWait", "Profiling of poke wait.");
732 HM_REG_PROFILE(&pHmCpu->StatSpinPokeFailed, "/PROF/CPU%u/HM/PokeWaitFailed", "Profiling of poke wait when RTMpPokeCpu fails.");
733 HM_REG_PROFILE(&pHmCpu->StatEntry, "/PROF/CPU%u/HM/Entry", "Profiling of entry until entering GC.");
734 HM_REG_PROFILE(&pHmCpu->StatPreExit, "/PROF/CPU%u/HM/SwitchFromGC_1", "Profiling of pre-exit processing after returning from GC.");
735 HM_REG_PROFILE(&pHmCpu->StatExitHandling, "/PROF/CPU%u/HM/SwitchFromGC_2", "Profiling of exit handling (longjmps not included!)");
736 HM_REG_PROFILE(&pHmCpu->StatExitIO, "/PROF/CPU%u/HM/SwitchFromGC_2/IO", "I/O.");
737 HM_REG_PROFILE(&pHmCpu->StatExitMovCRx, "/PROF/CPU%u/HM/SwitchFromGC_2/MovCRx", "MOV CRx.");
738 HM_REG_PROFILE(&pHmCpu->StatExitXcptNmi, "/PROF/CPU%u/HM/SwitchFromGC_2/XcptNmi", "Exceptions, NMIs.");
739 HM_REG_PROFILE(&pHmCpu->StatExitVmentry, "/PROF/CPU%u/HM/SwitchFromGC_2/Vmentry", "VMLAUNCH/VMRESUME on Intel or VMRUN on AMD.");
740 HM_REG_PROFILE(&pHmCpu->StatImportGuestState, "/PROF/CPU%u/HM/ImportGuestState", "Profiling of importing guest state from hardware after VM-exit.");
741 HM_REG_PROFILE(&pHmCpu->StatExportGuestState, "/PROF/CPU%u/HM/ExportGuestState", "Profiling of exporting guest state to hardware before VM-entry.");
742 HM_REG_PROFILE(&pHmCpu->StatLoadGuestFpuState, "/PROF/CPU%u/HM/LoadGuestFpuState", "Profiling of CPUMR0LoadGuestFPU.");
743 HM_REG_PROFILE(&pHmCpu->StatInGC, "/PROF/CPU%u/HM/InGC", "Profiling of execution of guest-code in hardware.");
744# ifdef HM_PROFILE_EXIT_DISPATCH
745 HM_REG_STAT(&pHmCpu->StatExitDispatch, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
746 "/PROF/CPU%u/HM/ExitDispatch", "Profiling the dispatching of exit handlers.");
747# endif
748#endif
749# define HM_REG_COUNTER(a, b, desc) HM_REG_STAT(a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, b, desc)
750
751#ifdef VBOX_WITH_STATISTICS
752 HM_REG_COUNTER(&pHmCpu->StatExitAll, "/HM/CPU%u/Exit/All", "Total exits (including nested-guest exits).");
753 HM_REG_COUNTER(&pHmCpu->StatNestedExitAll, "/HM/CPU%u/Exit/NestedGuest/All", "Total nested-guest exits.");
754 HM_REG_COUNTER(&pHmCpu->StatExitShadowNM, "/HM/CPU%u/Exit/Trap/Shw/#NM", "Shadow #NM (device not available, no math co-processor) exception.");
755 HM_REG_COUNTER(&pHmCpu->StatExitGuestNM, "/HM/CPU%u/Exit/Trap/Gst/#NM", "Guest #NM (device not available, no math co-processor) exception.");
756 HM_REG_COUNTER(&pHmCpu->StatExitShadowPF, "/HM/CPU%u/Exit/Trap/Shw/#PF", "Shadow #PF (page fault) exception.");
757 HM_REG_COUNTER(&pHmCpu->StatExitShadowPFEM, "/HM/CPU%u/Exit/Trap/Shw/#PF-EM", "#PF (page fault) exception going back to ring-3 for emulating the instruction.");
758 HM_REG_COUNTER(&pHmCpu->StatExitGuestPF, "/HM/CPU%u/Exit/Trap/Gst/#PF", "Guest #PF (page fault) exception.");
759 HM_REG_COUNTER(&pHmCpu->StatExitGuestUD, "/HM/CPU%u/Exit/Trap/Gst/#UD", "Guest #UD (undefined opcode) exception.");
760 HM_REG_COUNTER(&pHmCpu->StatExitGuestSS, "/HM/CPU%u/Exit/Trap/Gst/#SS", "Guest #SS (stack-segment fault) exception.");
761 HM_REG_COUNTER(&pHmCpu->StatExitGuestNP, "/HM/CPU%u/Exit/Trap/Gst/#NP", "Guest #NP (segment not present) exception.");
762 HM_REG_COUNTER(&pHmCpu->StatExitGuestTS, "/HM/CPU%u/Exit/Trap/Gst/#TS", "Guest #TS (task switch) exception.");
763 HM_REG_COUNTER(&pHmCpu->StatExitGuestOF, "/HM/CPU%u/Exit/Trap/Gst/#OF", "Guest #OF (overflow) exception.");
764 HM_REG_COUNTER(&pHmCpu->StatExitGuestGP, "/HM/CPU%u/Exit/Trap/Gst/#GP", "Guest #GP (general protection) exception.");
765 HM_REG_COUNTER(&pHmCpu->StatExitGuestDE, "/HM/CPU%u/Exit/Trap/Gst/#DE", "Guest #DE (divide error) exception.");
766 HM_REG_COUNTER(&pHmCpu->StatExitGuestDF, "/HM/CPU%u/Exit/Trap/Gst/#DF", "Guest #DF (double fault) exception.");
767 HM_REG_COUNTER(&pHmCpu->StatExitGuestBR, "/HM/CPU%u/Exit/Trap/Gst/#BR", "Guest #BR (boundary range exceeded) exception.");
768#endif
769 HM_REG_COUNTER(&pHmCpu->StatExitGuestAC, "/HM/CPU%u/Exit/Trap/Gst/#AC", "Guest #AC (alignment check) exception.");
770 if (fCpuSupportsVmx)
771 HM_REG_COUNTER(&pHmCpu->StatExitGuestACSplitLock, "/HM/CPU%u/Exit/Trap/Gst/#AC-split-lock", "Guest triggered #AC due to split-lock being enabled on the host (interpreted).");
772#ifdef VBOX_WITH_STATISTICS
773 HM_REG_COUNTER(&pHmCpu->StatExitGuestDB, "/HM/CPU%u/Exit/Trap/Gst/#DB", "Guest #DB (debug) exception.");
774 HM_REG_COUNTER(&pHmCpu->StatExitGuestMF, "/HM/CPU%u/Exit/Trap/Gst/#MF", "Guest #MF (x87 FPU error, math fault) exception.");
775 HM_REG_COUNTER(&pHmCpu->StatExitGuestBP, "/HM/CPU%u/Exit/Trap/Gst/#BP", "Guest #BP (breakpoint) exception.");
776 HM_REG_COUNTER(&pHmCpu->StatExitGuestXF, "/HM/CPU%u/Exit/Trap/Gst/#XF", "Guest #XF (extended math fault, SIMD FPU) exception.");
777 HM_REG_COUNTER(&pHmCpu->StatExitGuestXcpUnk, "/HM/CPU%u/Exit/Trap/Gst/Other", "Other guest exceptions.");
778 HM_REG_COUNTER(&pHmCpu->StatExitRdmsr, "/HM/CPU%u/Exit/Instr/Rdmsr", "MSR read.");
779 HM_REG_COUNTER(&pHmCpu->StatExitWrmsr, "/HM/CPU%u/Exit/Instr/Wrmsr", "MSR write.");
780 HM_REG_COUNTER(&pHmCpu->StatExitDRxWrite, "/HM/CPU%u/Exit/Instr/DR-Write", "Debug register write.");
781 HM_REG_COUNTER(&pHmCpu->StatExitDRxRead, "/HM/CPU%u/Exit/Instr/DR-Read", "Debug register read.");
782 HM_REG_COUNTER(&pHmCpu->StatExitCR0Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR0", "CR0 read.");
783 HM_REG_COUNTER(&pHmCpu->StatExitCR2Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR2", "CR2 read.");
784 HM_REG_COUNTER(&pHmCpu->StatExitCR3Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR3", "CR3 read.");
785 HM_REG_COUNTER(&pHmCpu->StatExitCR4Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR4", "CR4 read.");
786 HM_REG_COUNTER(&pHmCpu->StatExitCR8Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR8", "CR8 read.");
787 HM_REG_COUNTER(&pHmCpu->StatExitCR0Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR0", "CR0 write.");
788 HM_REG_COUNTER(&pHmCpu->StatExitCR2Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR2", "CR2 write.");
789 HM_REG_COUNTER(&pHmCpu->StatExitCR3Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR3", "CR3 write.");
790 HM_REG_COUNTER(&pHmCpu->StatExitCR4Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR4", "CR4 write.");
791 HM_REG_COUNTER(&pHmCpu->StatExitCR8Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR8", "CR8 write.");
792 HM_REG_COUNTER(&pHmCpu->StatExitClts, "/HM/CPU%u/Exit/Instr/CLTS", "CLTS instruction.");
793 HM_REG_COUNTER(&pHmCpu->StatExitLmsw, "/HM/CPU%u/Exit/Instr/LMSW", "LMSW instruction.");
794 HM_REG_COUNTER(&pHmCpu->StatExitXdtrAccess, "/HM/CPU%u/Exit/Instr/XdtrAccess", "GDTR, IDTR, LDTR access.");
795 HM_REG_COUNTER(&pHmCpu->StatExitIOWrite, "/HM/CPU%u/Exit/Instr/IO/Write", "I/O write.");
796 HM_REG_COUNTER(&pHmCpu->StatExitIORead, "/HM/CPU%u/Exit/Instr/IO/Read", "I/O read.");
797 HM_REG_COUNTER(&pHmCpu->StatExitIOStringWrite, "/HM/CPU%u/Exit/Instr/IO/WriteString", "String I/O write.");
798 HM_REG_COUNTER(&pHmCpu->StatExitIOStringRead, "/HM/CPU%u/Exit/Instr/IO/ReadString", "String I/O read.");
799 HM_REG_COUNTER(&pHmCpu->StatExitIntWindow, "/HM/CPU%u/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts.");
800 HM_REG_COUNTER(&pHmCpu->StatExitExtInt, "/HM/CPU%u/Exit/ExtInt", "Physical maskable interrupt (host).");
801#endif
802 HM_REG_COUNTER(&pHmCpu->StatExitHostNmiInGC, "/HM/CPU%u/Exit/HostNmiInGC", "Host NMI received while in guest context.");
803 HM_REG_COUNTER(&pHmCpu->StatExitHostNmiInGCIpi, "/HM/CPU%u/Exit/HostNmiInGCIpi", "Host NMI received while in guest context dispatched using IPIs.");
804 HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer, "/HM/CPU%u/Exit/PreemptTimer", "VMX-preemption timer expired.");
805#ifdef VBOX_WITH_STATISTICS
806 HM_REG_COUNTER(&pHmCpu->StatExitTprBelowThreshold, "/HM/CPU%u/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest.");
807 HM_REG_COUNTER(&pHmCpu->StatExitTaskSwitch, "/HM/CPU%u/Exit/TaskSwitch", "Task switch caused through task gate in IDT.");
808 HM_REG_COUNTER(&pHmCpu->StatExitApicAccess, "/HM/CPU%u/Exit/ApicAccess", "APIC access. Guest attempted to access memory at a physical address on the APIC-access page.");
809
810 HM_REG_COUNTER(&pHmCpu->StatSwitchTprMaskedIrq, "/HM/CPU%u/Switch/TprMaskedIrq", "PDMGetInterrupt() signals TPR masks pending Irq.");
811 HM_REG_COUNTER(&pHmCpu->StatSwitchGuestIrq, "/HM/CPU%u/Switch/IrqPending", "PDMGetInterrupt() cleared behind our back!?!.");
812 HM_REG_COUNTER(&pHmCpu->StatSwitchPendingHostIrq, "/HM/CPU%u/Switch/PendingHostIrq", "Exit to ring-3 due to pending host interrupt before executing guest code.");
813 HM_REG_COUNTER(&pHmCpu->StatSwitchHmToR3FF, "/HM/CPU%u/Switch/HmToR3FF", "Exit to ring-3 due to pending timers, EMT rendezvous, critical section etc.");
814 HM_REG_COUNTER(&pHmCpu->StatSwitchVmReq, "/HM/CPU%u/Switch/VmReq", "Exit to ring-3 due to pending VM requests.");
815 HM_REG_COUNTER(&pHmCpu->StatSwitchPgmPoolFlush, "/HM/CPU%u/Switch/PgmPoolFlush", "Exit to ring-3 due to pending PGM pool flush.");
816 HM_REG_COUNTER(&pHmCpu->StatSwitchDma, "/HM/CPU%u/Switch/PendingDma", "Exit to ring-3 due to pending DMA requests.");
817 HM_REG_COUNTER(&pHmCpu->StatSwitchExitToR3, "/HM/CPU%u/Switch/ExitToR3", "Exit to ring-3 (total).");
818 HM_REG_COUNTER(&pHmCpu->StatSwitchLongJmpToR3, "/HM/CPU%u/Switch/LongJmpToR3", "Longjump to ring-3.");
819 HM_REG_COUNTER(&pHmCpu->StatSwitchMaxResumeLoops, "/HM/CPU%u/Switch/MaxResumeLoops", "Maximum VMRESUME inner-loop counter reached.");
820 HM_REG_COUNTER(&pHmCpu->StatSwitchHltToR3, "/HM/CPU%u/Switch/HltToR3", "HLT causing us to go to ring-3.");
821 HM_REG_COUNTER(&pHmCpu->StatSwitchApicAccessToR3, "/HM/CPU%u/Switch/ApicAccessToR3", "APIC access causing us to go to ring-3.");
822#endif
823 HM_REG_COUNTER(&pHmCpu->StatSwitchPreempt, "/HM/CPU%u/Switch/Preempting", "EMT has been preempted while in HM context.");
824#ifdef VBOX_WITH_STATISTICS
825 HM_REG_COUNTER(&pHmCpu->StatSwitchNstGstVmexit, "/HM/CPU%u/Switch/NstGstVmexit", "Nested-guest VM-exit occurred.");
826
827 HM_REG_COUNTER(&pHmCpu->StatInjectInterrupt, "/HM/CPU%u/EventInject/Interrupt", "Injected an external interrupt into the guest.");
828 HM_REG_COUNTER(&pHmCpu->StatInjectXcpt, "/HM/CPU%u/EventInject/Trap", "Injected an exception into the guest.");
829 HM_REG_COUNTER(&pHmCpu->StatInjectReflect, "/HM/CPU%u/EventInject/Reflect", "Reflecting an exception caused due to event injection.");
830 HM_REG_COUNTER(&pHmCpu->StatInjectConvertDF, "/HM/CPU%u/EventInject/ReflectDF", "Injected a converted #DF caused due to event injection.");
831 HM_REG_COUNTER(&pHmCpu->StatInjectInterpret, "/HM/CPU%u/EventInject/Interpret", "Falling back to interpreter for handling exception caused due to event injection.");
832 HM_REG_COUNTER(&pHmCpu->StatInjectReflectNPF, "/HM/CPU%u/EventInject/ReflectNPF", "Reflecting event that caused an EPT violation / nested #PF.");
833
834 HM_REG_COUNTER(&pHmCpu->StatFlushPage, "/HM/CPU%u/Flush/Page", "Invalidating a guest page on all guest CPUs.");
835 HM_REG_COUNTER(&pHmCpu->StatFlushPageManual, "/HM/CPU%u/Flush/Page/Virt", "Invalidating a guest page using guest-virtual address.");
836 HM_REG_COUNTER(&pHmCpu->StatFlushPhysPageManual, "/HM/CPU%u/Flush/Page/Phys", "Invalidating a guest page using guest-physical address.");
837 HM_REG_COUNTER(&pHmCpu->StatFlushTlb, "/HM/CPU%u/Flush/TLB", "Forcing a full guest-TLB flush (ring-0).");
838 HM_REG_COUNTER(&pHmCpu->StatFlushTlbManual, "/HM/CPU%u/Flush/TLB/Manual", "Request a full guest-TLB flush.");
839 HM_REG_COUNTER(&pHmCpu->StatFlushTlbNstGst, "/HM/CPU%u/Flush/TLB/NestedGuest", "Request a nested-guest-TLB flush.");
840 HM_REG_COUNTER(&pHmCpu->StatFlushTlbWorldSwitch, "/HM/CPU%u/Flush/TLB/CpuSwitch", "Forcing a full guest-TLB flush due to host-CPU reschedule or ASID-limit hit by another guest-VCPU.");
841 HM_REG_COUNTER(&pHmCpu->StatNoFlushTlbWorldSwitch, "/HM/CPU%u/Flush/TLB/Skipped", "No TLB flushing required.");
842 HM_REG_COUNTER(&pHmCpu->StatFlushEntire, "/HM/CPU%u/Flush/TLB/Entire", "Flush the entire TLB (host + guest).");
843 HM_REG_COUNTER(&pHmCpu->StatFlushAsid, "/HM/CPU%u/Flush/TLB/ASID", "Flushed guest-TLB entries for the current VPID.");
844 HM_REG_COUNTER(&pHmCpu->StatFlushNestedPaging, "/HM/CPU%u/Flush/TLB/NestedPaging", "Flushed guest-TLB entries for the current EPT.");
845 HM_REG_COUNTER(&pHmCpu->StatFlushTlbInvlpgVirt, "/HM/CPU%u/Flush/TLB/InvlpgVirt", "Invalidated a guest-TLB entry for a guest-virtual address.");
846 HM_REG_COUNTER(&pHmCpu->StatFlushTlbInvlpgPhys, "/HM/CPU%u/Flush/TLB/InvlpgPhys", "Currently not possible, flushes entire guest-TLB.");
847 HM_REG_COUNTER(&pHmCpu->StatTlbShootdown, "/HM/CPU%u/Flush/Shootdown/Page", "Inter-VCPU request to flush queued guest page.");
848 HM_REG_COUNTER(&pHmCpu->StatTlbShootdownFlush, "/HM/CPU%u/Flush/Shootdown/TLB", "Inter-VCPU request to flush entire guest-TLB.");
849
850 HM_REG_COUNTER(&pHmCpu->StatTscParavirt, "/HM/CPU%u/TSC/Paravirt", "Paravirtualized TSC in effect.");
851 HM_REG_COUNTER(&pHmCpu->StatTscOffset, "/HM/CPU%u/TSC/Offset", "TSC offsetting is in effect.");
852 HM_REG_COUNTER(&pHmCpu->StatTscIntercept, "/HM/CPU%u/TSC/Intercept", "Intercept TSC accesses.");
853
854 HM_REG_COUNTER(&pHmCpu->StatDRxArmed, "/HM/CPU%u/Debug/Armed", "Loaded guest-debug state while loading guest-state.");
855 HM_REG_COUNTER(&pHmCpu->StatDRxContextSwitch, "/HM/CPU%u/Debug/ContextSwitch", "Loaded guest-debug state on MOV DRx.");
856 HM_REG_COUNTER(&pHmCpu->StatDRxIoCheck, "/HM/CPU%u/Debug/IOCheck", "Checking for I/O breakpoint.");
857
858 HM_REG_COUNTER(&pHmCpu->StatExportMinimal, "/HM/CPU%u/Export/Minimal", "VM-entry exporting minimal guest-state.");
859 HM_REG_COUNTER(&pHmCpu->StatExportFull, "/HM/CPU%u/Export/Full", "VM-entry exporting the full guest-state.");
860 HM_REG_COUNTER(&pHmCpu->StatLoadGuestFpu, "/HM/CPU%u/Export/GuestFpu", "VM-entry loading the guest-FPU state.");
861 HM_REG_COUNTER(&pHmCpu->StatExportHostState, "/HM/CPU%u/Export/HostState", "VM-entry exporting host-state.");
862
863 if (fCpuSupportsVmx)
864 {
865 HM_REG_COUNTER(&pHmCpu->StatVmxWriteHostRip, "/HM/CPU%u/WriteHostRIP", "Number of VMX_VMCS_HOST_RIP instructions.");
866 HM_REG_COUNTER(&pHmCpu->StatVmxWriteHostRsp, "/HM/CPU%u/WriteHostRSP", "Number of VMX_VMCS_HOST_RSP instructions.");
867 HM_REG_COUNTER(&pHmCpu->StatVmxVmLaunch, "/HM/CPU%u/VMLaunch", "Number of VM-entries using VMLAUNCH.");
868 HM_REG_COUNTER(&pHmCpu->StatVmxVmResume, "/HM/CPU%u/VMResume", "Number of VM-entries using VMRESUME.");
869 }
870
871 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelBase, "/HM/CPU%u/VMXCheck/RMSelBase", "Could not use VMX due to unsuitable real-mode selector base.");
872 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelLimit, "/HM/CPU%u/VMXCheck/RMSelLimit", "Could not use VMX due to unsuitable real-mode selector limit.");
873 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelAttr, "/HM/CPU%u/VMXCheck/RMSelAttrs", "Could not use VMX due to unsuitable real-mode selector attributes.");
874
875 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelBase, "/HM/CPU%u/VMXCheck/V86SelBase", "Could not use VMX due to unsuitable v8086-mode selector base.");
876 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelLimit, "/HM/CPU%u/VMXCheck/V86SelLimit", "Could not use VMX due to unsuitable v8086-mode selector limit.");
877 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelAttr, "/HM/CPU%u/VMXCheck/V86SelAttrs", "Could not use VMX due to unsuitable v8086-mode selector attributes.");
878
879 HM_REG_COUNTER(&pHmCpu->StatVmxCheckRmOk, "/HM/CPU%u/VMXCheck/VMX_RM", "VMX execution in real (V86) mode OK.");
880 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadSel, "/HM/CPU%u/VMXCheck/Selector", "Could not use VMX due to unsuitable selector.");
881 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRpl, "/HM/CPU%u/VMXCheck/RPL", "Could not use VMX due to unsuitable RPL.");
882 HM_REG_COUNTER(&pHmCpu->StatVmxCheckPmOk, "/HM/CPU%u/VMXCheck/VMX_PM", "VMX execution in protected mode OK.");
883#endif
884 if (fCpuSupportsVmx)
885 {
886 HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer, "/HM/CPU%u/PreemptTimer", "VMX-preemption timer fired.");
887 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionReusingDeadline, "/HM/CPU%u/PreemptTimer/ReusingDeadline", "VMX-preemption timer arming logic using previously calculated deadline");
888 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionReusingDeadlineExpired, "/HM/CPU%u/PreemptTimer/ReusingDeadlineExpired", "VMX-preemption timer arming logic found previous deadline already expired (ignored)");
889 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionRecalcingDeadline, "/HM/CPU%u/PreemptTimer/RecalcingDeadline", "VMX-preemption timer arming logic recalculating the deadline (slightly expensive)");
890 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionRecalcingDeadlineExpired, "/HM/CPU%u/PreemptTimer/RecalcingDeadlineExpired", "VMX-preemption timer arming logic found recalculated deadline expired (ignored)");
891 }
892#ifdef VBOX_WITH_STATISTICS
893 /*
894 * Guest Exit reason stats.
895 */
896 if (fCpuSupportsVmx)
897 {
898 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
899 {
900 const char *pszExitName = HMGetVmxExitName(j);
901 if (pszExitName)
902 {
903 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
904 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/Reason/%02x", idCpu, j);
905 AssertRCReturn(rc, rc);
906 }
907 }
908 }
909 else
910 {
911 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
912 {
913 const char *pszExitName = HMGetSvmExitName(j);
914 if (pszExitName)
915 {
916 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
917 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/Reason/%02x", idCpu, j);
918 AssertRC(rc);
919 }
920 }
921 }
922 HM_REG_COUNTER(&pHmCpu->StatExitReasonNpf, "/HM/CPU%u/Exit/Reason/#NPF", "Nested page faults");
923
924#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
925 /*
926 * Nested-guest VM-exit reason stats.
927 */
928 if (fCpuSupportsVmx)
929 {
930 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
931 {
932 const char *pszExitName = HMGetVmxExitName(j);
933 if (pszExitName)
934 {
935 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
936 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/NestedGuest/Reason/%02x", idCpu, j);
937 AssertRC(rc);
938 }
939 }
940 }
941 else
942 {
943 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
944 {
945 const char *pszExitName = HMGetSvmExitName(j);
946 if (pszExitName)
947 {
948 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
949 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/NestedGuest/Reason/%02x", idCpu, j);
950 AssertRC(rc);
951 }
952 }
953 }
954 HM_REG_COUNTER(&pHmCpu->StatNestedExitReasonNpf, "/HM/CPU%u/Exit/NestedGuest/Reason/#NPF", "Nested page faults");
955#endif
956
957 /*
958 * Injected interrupts stats.
959 */
960 char szDesc[64];
961 for (unsigned j = 0; j < RT_ELEMENTS(pHmCpu->aStatInjectedIrqs); j++)
962 {
963 RTStrPrintf(&szDesc[0], sizeof(szDesc), "Interrupt %u", j);
964 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
965 STAMUNIT_OCCURENCES, szDesc, "/HM/CPU%u/EventInject/InjectIntr/%02X", idCpu, j);
966 AssertRC(rc);
967 }
968
969 /*
970 * Injected exception stats.
971 */
972 for (unsigned j = 0; j < RT_ELEMENTS(pHmCpu->aStatInjectedXcpts); j++)
973 {
974 RTStrPrintf(&szDesc[0], sizeof(szDesc), "%s exception", hmR3GetXcptName(j));
975 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatInjectedXcpts[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
976 STAMUNIT_OCCURENCES, szDesc, "/HM/CPU%u/EventInject/InjectXcpt/%02X", idCpu, j);
977 AssertRC(rc);
978 }
979
980#endif /* VBOX_WITH_STATISTICS */
981#undef HM_REG_COUNTER
982#undef HM_REG_PROFILE
983#undef HM_REG_STAT
984 }
985
986 return VINF_SUCCESS;
987}
988
989
990/**
991 * Called when a init phase has completed.
992 *
993 * @returns VBox status code.
994 * @param pVM The cross context VM structure.
995 * @param enmWhat The phase that completed.
996 */
997VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
998{
999 switch (enmWhat)
1000 {
1001 case VMINITCOMPLETED_RING3:
1002 return hmR3InitFinalizeR3(pVM);
1003 case VMINITCOMPLETED_RING0:
1004 return hmR3InitFinalizeR0(pVM);
1005 default:
1006 return VINF_SUCCESS;
1007 }
1008}
1009
1010
1011/**
1012 * Turns off normal raw mode features.
1013 *
1014 * @param pVM The cross context VM structure.
1015 */
1016static void hmR3DisableRawMode(PVM pVM)
1017{
1018/** @todo r=bird: HM shouldn't be doing this crap. */
1019 /* Reinit the paging mode to force the new shadow mode. */
1020 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1021 {
1022 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1023 PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL);
1024 }
1025}
1026
1027
1028/**
1029 * Initialize VT-x or AMD-V.
1030 *
1031 * @returns VBox status code.
1032 * @param pVM The cross context VM structure.
1033 */
1034static int hmR3InitFinalizeR0(PVM pVM)
1035{
1036 int rc;
1037
1038 if (!HMIsEnabled(pVM))
1039 return VINF_SUCCESS;
1040
1041 /*
1042 * Hack to allow users to work around broken BIOSes that incorrectly set
1043 * EFER.SVME, which makes us believe somebody else is already using AMD-V.
1044 */
1045 if ( !pVM->hm.s.vmx.fSupported
1046 && !pVM->hm.s.svm.fSupported
1047 && pVM->hm.s.ForR3.rcInit == VERR_SVM_IN_USE /* implies functional AMD-V */
1048 && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
1049 {
1050 LogRel(("HM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
1051 pVM->hm.s.svm.fSupported = true;
1052 pVM->hm.s.svm.fIgnoreInUseError = true;
1053 pVM->hm.s.ForR3.rcInit = VINF_SUCCESS;
1054 }
1055
1056 /*
1057 * Report ring-0 init errors.
1058 */
1059 if ( !pVM->hm.s.vmx.fSupported
1060 && !pVM->hm.s.svm.fSupported)
1061 {
1062 LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.ForR3.rcInit));
1063 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.ForR3.vmx.u64HostFeatCtrl));
1064 switch (pVM->hm.s.ForR3.rcInit)
1065 {
1066 case VERR_VMX_IN_VMX_ROOT_MODE:
1067 return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor");
1068 case VERR_VMX_NO_VMX:
1069 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available");
1070 case VERR_VMX_MSR_VMX_DISABLED:
1071 return VM_SET_ERROR(pVM, VERR_VMX_MSR_VMX_DISABLED, "VT-x is disabled in the BIOS");
1072 case VERR_VMX_MSR_ALL_VMX_DISABLED:
1073 return VM_SET_ERROR(pVM, VERR_VMX_MSR_ALL_VMX_DISABLED, "VT-x is disabled in the BIOS for all CPU modes");
1074 case VERR_VMX_MSR_LOCKING_FAILED:
1075 return VM_SET_ERROR(pVM, VERR_VMX_MSR_LOCKING_FAILED, "Failed to lock VT-x features while trying to enable VT-x");
1076 case VERR_VMX_MSR_VMX_ENABLE_FAILED:
1077 return VM_SET_ERROR(pVM, VERR_VMX_MSR_VMX_ENABLE_FAILED, "Failed to enable VT-x features");
1078 case VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED:
1079 return VM_SET_ERROR(pVM, VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED, "Failed to enable VT-x features in SMX mode");
1080
1081 case VERR_SVM_IN_USE:
1082 return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor");
1083 case VERR_SVM_NO_SVM:
1084 return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available");
1085 case VERR_SVM_DISABLED:
1086 return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS");
1087 }
1088 return VMSetError(pVM, pVM->hm.s.ForR3.rcInit, RT_SRC_POS, "HM ring-0 init failed: %Rrc", pVM->hm.s.ForR3.rcInit);
1089 }
1090
1091 /*
1092 * Enable VT-x or AMD-V on all host CPUs.
1093 */
1094 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_HM_ENABLE, 0, NULL);
1095 if (RT_FAILURE(rc))
1096 {
1097 LogRel(("HM: Failed to enable, error %Rrc\n", rc));
1098 HMR3CheckError(pVM, rc);
1099 return rc;
1100 }
1101
1102 /*
1103 * No TPR patching is required when the IO-APIC is not enabled for this VM.
1104 * (Main should have taken care of this already)
1105 */
1106 if (!PDMHasIoApic(pVM))
1107 {
1108 Assert(!pVM->hm.s.fTprPatchingAllowed); /* paranoia */
1109 pVM->hm.s.fTprPatchingAllowed = false;
1110 }
1111
1112 LogRel(("HM: fWorldSwitcher=%#x (fIbpbOnVmExit=%RTbool fIbpbOnVmEntry=%RTbool fL1dFlushOnVmEntry=%RTbool); fL1dFlushOnSched=%RTbool fMdsClearOnVmEntry=%RTbool\n",
1113 pVM->hm.s.ForR3.fWorldSwitcher, pVM->hm.s.fIbpbOnVmExit, pVM->hm.s.fIbpbOnVmEntry, pVM->hm.s.fL1dFlushOnVmEntry,
1114 pVM->hm.s.fL1dFlushOnSched, pVM->hm.s.fMdsClearOnVmEntry));
1115
1116 /*
1117 * Do the vendor specific initialization
1118 *
1119 * Note! We disable release log buffering here since we're doing relatively
1120 * lot of logging and doesn't want to hit the disk with each LogRel
1121 * statement.
1122 */
1123 AssertLogRelReturn(!pVM->hm.s.fInitialized, VERR_HM_IPE_5);
1124 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
1125 if (pVM->hm.s.vmx.fSupported)
1126 rc = hmR3InitFinalizeR0Intel(pVM);
1127 else
1128 rc = hmR3InitFinalizeR0Amd(pVM);
1129 LogRel((pVM->hm.s.fGlobalInit ? "HM: VT-x/AMD-V init method: Global\n"
1130 : "HM: VT-x/AMD-V init method: Local\n"));
1131 RTLogRelSetBuffering(fOldBuffered);
1132 pVM->hm.s.fInitialized = true;
1133
1134 return rc;
1135}
1136
1137
1138/**
1139 * @callback_method_impl{FNPDMVMMDEVHEAPNOTIFY}
1140 */
1141static DECLCALLBACK(void) hmR3VmmDevHeapNotify(PVM pVM, void *pvAllocation, RTGCPHYS GCPhysAllocation)
1142{
1143 NOREF(pVM);
1144 NOREF(pvAllocation);
1145 NOREF(GCPhysAllocation);
1146}
1147
1148
1149/**
1150 * Returns a description of the VMCS (and associated regions') memory type given the
1151 * IA32_VMX_BASIC MSR.
1152 *
1153 * @returns The descriptive memory type.
1154 * @param uMsrVmxBasic IA32_VMX_BASIC MSR value.
1155 */
1156static const char *hmR3VmxGetMemTypeDesc(uint64_t uMsrVmxBasic)
1157{
1158 uint8_t const uMemType = RT_BF_GET(uMsrVmxBasic, VMX_BF_BASIC_VMCS_MEM_TYPE);
1159 switch (uMemType)
1160 {
1161 case VMX_BASIC_MEM_TYPE_WB: return "Write Back (WB)";
1162 case VMX_BASIC_MEM_TYPE_UC: return "Uncacheable (UC)";
1163 }
1164 return "Unknown";
1165}
1166
1167
1168/**
1169 * Returns a single-line description of all the activity-states supported by the CPU
1170 * given the IA32_VMX_MISC MSR.
1171 *
1172 * @returns All supported activity states.
1173 * @param uMsrMisc IA32_VMX_MISC MSR value.
1174 */
1175static const char *hmR3VmxGetActivityStateAllDesc(uint64_t uMsrMisc)
1176{
1177 static const char * const s_apszActStates[] =
1178 {
1179 "",
1180 " ( HLT )",
1181 " ( SHUTDOWN )",
1182 " ( HLT SHUTDOWN )",
1183 " ( SIPI_WAIT )",
1184 " ( HLT SIPI_WAIT )",
1185 " ( SHUTDOWN SIPI_WAIT )",
1186 " ( HLT SHUTDOWN SIPI_WAIT )"
1187 };
1188 uint8_t const idxActStates = RT_BF_GET(uMsrMisc, VMX_BF_MISC_ACTIVITY_STATES);
1189 Assert(idxActStates < RT_ELEMENTS(s_apszActStates));
1190 return s_apszActStates[idxActStates];
1191}
1192
1193
1194/**
1195 * Reports MSR_IA32_FEATURE_CONTROL MSR to the log.
1196 *
1197 * @param fFeatMsr The feature control MSR value.
1198 */
1199static void hmR3VmxReportFeatCtlMsr(uint64_t fFeatMsr)
1200{
1201 uint64_t const val = fFeatMsr;
1202 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", val));
1203 HMVMX_REPORT_MSR_CAP(val, "LOCK", MSR_IA32_FEATURE_CONTROL_LOCK);
1204 HMVMX_REPORT_MSR_CAP(val, "SMX_VMXON", MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
1205 HMVMX_REPORT_MSR_CAP(val, "VMXON", MSR_IA32_FEATURE_CONTROL_VMXON);
1206 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN0", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_0);
1207 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN1", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_1);
1208 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN2", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_2);
1209 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN3", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_3);
1210 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN4", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_4);
1211 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN5", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_5);
1212 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN6", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_6);
1213 HMVMX_REPORT_MSR_CAP(val, "SENTER_GLOBAL_EN", MSR_IA32_FEATURE_CONTROL_SENTER_GLOBAL_EN);
1214 HMVMX_REPORT_MSR_CAP(val, "SGX_LAUNCH_EN", MSR_IA32_FEATURE_CONTROL_SGX_LAUNCH_EN);
1215 HMVMX_REPORT_MSR_CAP(val, "SGX_GLOBAL_EN", MSR_IA32_FEATURE_CONTROL_SGX_GLOBAL_EN);
1216 HMVMX_REPORT_MSR_CAP(val, "LMCE", MSR_IA32_FEATURE_CONTROL_LMCE);
1217 if (!(val & MSR_IA32_FEATURE_CONTROL_LOCK))
1218 LogRel(("HM: MSR_IA32_FEATURE_CONTROL lock bit not set, possibly bad hardware!\n"));
1219}
1220
1221
1222/**
1223 * Reports MSR_IA32_VMX_BASIC MSR to the log.
1224 *
1225 * @param uBasicMsr The VMX basic MSR value.
1226 */
1227static void hmR3VmxReportBasicMsr(uint64_t uBasicMsr)
1228{
1229 LogRel(("HM: MSR_IA32_VMX_BASIC = %#RX64\n", uBasicMsr));
1230 LogRel(("HM: VMCS id = %#x\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_ID)));
1231 LogRel(("HM: VMCS size = %u bytes\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_SIZE)));
1232 LogRel(("HM: VMCS physical address limit = %s\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_PHYSADDR_WIDTH) ?
1233 "< 4 GB" : "None"));
1234 LogRel(("HM: VMCS memory type = %s\n", hmR3VmxGetMemTypeDesc(uBasicMsr)));
1235 LogRel(("HM: Dual-monitor treatment support = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_DUAL_MON)));
1236 LogRel(("HM: OUTS & INS instruction-info = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_INS_OUTS)));
1237 LogRel(("HM: Supports true-capability MSRs = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_TRUE_CTLS)));
1238 LogRel(("HM: VM-entry Xcpt error-code optional = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_XCPT_ERRCODE)));
1239}
1240
1241
1242/**
1243 * Reports MSR_IA32_PINBASED_CTLS to the log.
1244 *
1245 * @param pVmxMsr Pointer to the VMX MSR.
1246 */
1247static void hmR3VmxReportPinBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1248{
1249 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1250 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1251 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %#RX64\n", pVmxMsr->u));
1252 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EXT_INT_EXIT", VMX_PIN_CTLS_EXT_INT_EXIT);
1253 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "NMI_EXIT", VMX_PIN_CTLS_NMI_EXIT);
1254 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRTUAL_NMI", VMX_PIN_CTLS_VIRT_NMI);
1255 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PREEMPT_TIMER", VMX_PIN_CTLS_PREEMPT_TIMER);
1256 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "POSTED_INT", VMX_PIN_CTLS_POSTED_INT);
1257}
1258
1259
1260/**
1261 * Reports MSR_IA32_VMX_PROCBASED_CTLS MSR to the log.
1262 *
1263 * @param pVmxMsr Pointer to the VMX MSR.
1264 */
1265static void hmR3VmxReportProcBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1266{
1267 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1268 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1269 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %#RX64\n", pVmxMsr->u));
1270 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INT_WINDOW_EXIT", VMX_PROC_CTLS_INT_WINDOW_EXIT);
1271 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TSC_OFFSETTING", VMX_PROC_CTLS_USE_TSC_OFFSETTING);
1272 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "HLT_EXIT", VMX_PROC_CTLS_HLT_EXIT);
1273 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INVLPG_EXIT", VMX_PROC_CTLS_INVLPG_EXIT);
1274 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MWAIT_EXIT", VMX_PROC_CTLS_MWAIT_EXIT);
1275 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDPMC_EXIT", VMX_PROC_CTLS_RDPMC_EXIT);
1276 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDTSC_EXIT", VMX_PROC_CTLS_RDTSC_EXIT);
1277 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR3_LOAD_EXIT", VMX_PROC_CTLS_CR3_LOAD_EXIT);
1278 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR3_STORE_EXIT", VMX_PROC_CTLS_CR3_STORE_EXIT);
1279 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TERTIARY_CTLS", VMX_PROC_CTLS_USE_TERTIARY_CTLS);
1280 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR8_LOAD_EXIT", VMX_PROC_CTLS_CR8_LOAD_EXIT);
1281 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR8_STORE_EXIT", VMX_PROC_CTLS_CR8_STORE_EXIT);
1282 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TPR_SHADOW", VMX_PROC_CTLS_USE_TPR_SHADOW);
1283 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "NMI_WINDOW_EXIT", VMX_PROC_CTLS_NMI_WINDOW_EXIT);
1284 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MOV_DR_EXIT", VMX_PROC_CTLS_MOV_DR_EXIT);
1285 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "UNCOND_IO_EXIT", VMX_PROC_CTLS_UNCOND_IO_EXIT);
1286 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_IO_BITMAPS", VMX_PROC_CTLS_USE_IO_BITMAPS);
1287 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MONITOR_TRAP_FLAG", VMX_PROC_CTLS_MONITOR_TRAP_FLAG);
1288 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_MSR_BITMAPS", VMX_PROC_CTLS_USE_MSR_BITMAPS);
1289 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MONITOR_EXIT", VMX_PROC_CTLS_MONITOR_EXIT);
1290 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PAUSE_EXIT", VMX_PROC_CTLS_PAUSE_EXIT);
1291 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_SECONDARY_CTLS", VMX_PROC_CTLS_USE_SECONDARY_CTLS);
1292}
1293
1294
1295/**
1296 * Reports MSR_IA32_VMX_PROCBASED_CTLS2 MSR to the log.
1297 *
1298 * @param pVmxMsr Pointer to the VMX MSR.
1299 */
1300static void hmR3VmxReportProcBasedCtls2Msr(PCVMXCTLSMSR pVmxMsr)
1301{
1302 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1303 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1304 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %#RX64\n", pVmxMsr->u));
1305 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_APIC_ACCESS", VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
1306 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EPT", VMX_PROC_CTLS2_EPT);
1307 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "DESC_TABLE_EXIT", VMX_PROC_CTLS2_DESC_TABLE_EXIT);
1308 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDTSCP", VMX_PROC_CTLS2_RDTSCP);
1309 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_X2APIC_MODE", VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
1310 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VPID", VMX_PROC_CTLS2_VPID);
1311 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "WBINVD_EXIT", VMX_PROC_CTLS2_WBINVD_EXIT);
1312 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "UNRESTRICTED_GUEST", VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
1313 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "APIC_REG_VIRT", VMX_PROC_CTLS2_APIC_REG_VIRT);
1314 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_INT_DELIVERY", VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
1315 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PAUSE_LOOP_EXIT", VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
1316 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDRAND_EXIT", VMX_PROC_CTLS2_RDRAND_EXIT);
1317 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INVPCID", VMX_PROC_CTLS2_INVPCID);
1318 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VMFUNC", VMX_PROC_CTLS2_VMFUNC);
1319 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VMCS_SHADOWING", VMX_PROC_CTLS2_VMCS_SHADOWING);
1320 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENCLS_EXIT", VMX_PROC_CTLS2_ENCLS_EXIT);
1321 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDSEED_EXIT", VMX_PROC_CTLS2_RDSEED_EXIT);
1322 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PML", VMX_PROC_CTLS2_PML);
1323 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EPT_XCPT_VE", VMX_PROC_CTLS2_EPT_XCPT_VE);
1324 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_PROC_CTLS2_CONCEAL_VMX_FROM_PT);
1325 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "XSAVES_XRSTORS", VMX_PROC_CTLS2_XSAVES_XRSTORS);
1326 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MODE_BASED_EPT_PERM", VMX_PROC_CTLS2_MODE_BASED_EPT_PERM);
1327 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SPP_EPT", VMX_PROC_CTLS2_SPP_EPT);
1328 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PT_EPT", VMX_PROC_CTLS2_PT_EPT);
1329 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "TSC_SCALING", VMX_PROC_CTLS2_TSC_SCALING);
1330 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USER_WAIT_PAUSE", VMX_PROC_CTLS2_USER_WAIT_PAUSE);
1331 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENCLV_EXIT", VMX_PROC_CTLS2_ENCLV_EXIT);
1332}
1333
1334
1335/**
1336 * Reports MSR_IA32_VMX_PROCBASED_CTLS3 MSR to the log.
1337 *
1338 * @param uProcCtls3 The tertiary processor-based VM-execution control MSR.
1339 */
1340static void hmR3VmxReportProcBasedCtls3Msr(uint64_t uProcCtls3)
1341{
1342 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS3 = %#RX64\n", uProcCtls3));
1343 LogRel(("HM: LOADIWKEY_EXIT = %RTbool\n", RT_BOOL(uProcCtls3 & VMX_PROC_CTLS3_LOADIWKEY_EXIT)));
1344}
1345
1346
1347/**
1348 * Reports MSR_IA32_VMX_ENTRY_CTLS to the log.
1349 *
1350 * @param pVmxMsr Pointer to the VMX MSR.
1351 */
1352static void hmR3VmxReportEntryCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1353{
1354 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1355 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1356 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %#RX64\n", pVmxMsr->u));
1357 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_DEBUG", VMX_ENTRY_CTLS_LOAD_DEBUG);
1358 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "IA32E_MODE_GUEST", VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
1359 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENTRY_TO_SMM", VMX_ENTRY_CTLS_ENTRY_TO_SMM);
1360 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "DEACTIVATE_DUAL_MON", VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON);
1361 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PERF_MSR", VMX_ENTRY_CTLS_LOAD_PERF_MSR);
1362 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PAT_MSR", VMX_ENTRY_CTLS_LOAD_PAT_MSR);
1363 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_EFER_MSR", VMX_ENTRY_CTLS_LOAD_EFER_MSR);
1364 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_BNDCFGS_MSR", VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR);
1365 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT);
1366 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_RTIT_CTL_MSR", VMX_ENTRY_CTLS_LOAD_RTIT_CTL_MSR);
1367 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_CET_STATE", VMX_ENTRY_CTLS_LOAD_CET_STATE);
1368 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PKRS_MSR", VMX_ENTRY_CTLS_LOAD_PKRS_MSR);
1369}
1370
1371
1372/**
1373 * Reports MSR_IA32_VMX_EXIT_CTLS to the log.
1374 *
1375 * @param pVmxMsr Pointer to the VMX MSR.
1376 */
1377static void hmR3VmxReportExitCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1378{
1379 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1380 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1381 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %#RX64\n", pVmxMsr->u));
1382 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_DEBUG", VMX_EXIT_CTLS_SAVE_DEBUG);
1383 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "HOST_ADDR_SPACE_SIZE", VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
1384 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PERF_MSR", VMX_EXIT_CTLS_LOAD_PERF_MSR);
1385 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ACK_EXT_INT", VMX_EXIT_CTLS_ACK_EXT_INT);
1386 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_PAT_MSR", VMX_EXIT_CTLS_SAVE_PAT_MSR);
1387 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PAT_MSR", VMX_EXIT_CTLS_LOAD_PAT_MSR);
1388 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_EFER_MSR", VMX_EXIT_CTLS_SAVE_EFER_MSR);
1389 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_EFER_MSR", VMX_EXIT_CTLS_LOAD_EFER_MSR);
1390 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_PREEMPT_TIMER", VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1391 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CLEAR_BNDCFGS_MSR", VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR);
1392 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT);
1393 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CLEAR_RTIT_CTL_MSR", VMX_EXIT_CTLS_CLEAR_RTIT_CTL_MSR);
1394 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_CET_STATE", VMX_EXIT_CTLS_LOAD_CET_STATE);
1395 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PKRS_MSR", VMX_EXIT_CTLS_LOAD_PKRS_MSR);
1396}
1397
1398
1399/**
1400 * Reports MSR_IA32_VMX_EPT_VPID_CAP MSR to the log.
1401 *
1402 * @param fCaps The VMX EPT/VPID capability MSR value.
1403 */
1404static void hmR3VmxReportEptVpidCapsMsr(uint64_t fCaps)
1405{
1406 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %#RX64\n", fCaps));
1407 HMVMX_REPORT_MSR_CAP(fCaps, "RWX_X_ONLY", MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY);
1408 HMVMX_REPORT_MSR_CAP(fCaps, "PAGE_WALK_LENGTH_4", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4);
1409 HMVMX_REPORT_MSR_CAP(fCaps, "PAGE_WALK_LENGTH_5", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_5);
1410 HMVMX_REPORT_MSR_CAP(fCaps, "MEMTYPE_UC", MSR_IA32_VMX_EPT_VPID_CAP_MEMTYPE_UC);
1411 HMVMX_REPORT_MSR_CAP(fCaps, "MEMTYPE_WB", MSR_IA32_VMX_EPT_VPID_CAP_MEMTYPE_WB);
1412 HMVMX_REPORT_MSR_CAP(fCaps, "PDE_2M", MSR_IA32_VMX_EPT_VPID_CAP_PDE_2M);
1413 HMVMX_REPORT_MSR_CAP(fCaps, "PDPTE_1G", MSR_IA32_VMX_EPT_VPID_CAP_PDPTE_1G);
1414 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT);
1415 HMVMX_REPORT_MSR_CAP(fCaps, "ACCESS_DIRTY", MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY);
1416 HMVMX_REPORT_MSR_CAP(fCaps, "ADVEXITINFO_EPT_VIOLATION", MSR_IA32_VMX_EPT_VPID_CAP_ADVEXITINFO_EPT_VIOLATION);
1417 HMVMX_REPORT_MSR_CAP(fCaps, "SUPER_SHW_STACK", MSR_IA32_VMX_EPT_VPID_CAP_SUPER_SHW_STACK);
1418 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT);
1419 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
1420 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID);
1421 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_INDIV_ADDR", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
1422 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT);
1423 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS);
1424 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS);
1425}
1426
1427
1428/**
1429 * Reports MSR_IA32_VMX_MISC MSR to the log.
1430 *
1431 * @param pVM Pointer to the VM.
1432 * @param fMisc The VMX misc. MSR value.
1433 */
1434static void hmR3VmxReportMiscMsr(PVM pVM, uint64_t fMisc)
1435{
1436 LogRel(("HM: MSR_IA32_VMX_MISC = %#RX64\n", fMisc));
1437 uint8_t const cPreemptTimerShift = RT_BF_GET(fMisc, VMX_BF_MISC_PREEMPT_TIMER_TSC);
1438 if (cPreemptTimerShift == pVM->hm.s.vmx.cPreemptTimerShift)
1439 LogRel(("HM: PREEMPT_TIMER_TSC = %#x\n", cPreemptTimerShift));
1440 else
1441 {
1442 LogRel(("HM: PREEMPT_TIMER_TSC = %#x - erratum detected, using %#x instead\n", cPreemptTimerShift,
1443 pVM->hm.s.vmx.cPreemptTimerShift));
1444 }
1445 LogRel(("HM: EXIT_SAVE_EFER_LMA = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_EXIT_SAVE_EFER_LMA)));
1446 LogRel(("HM: ACTIVITY_STATES = %#x%s\n", RT_BF_GET(fMisc, VMX_BF_MISC_ACTIVITY_STATES),
1447 hmR3VmxGetActivityStateAllDesc(fMisc)));
1448 LogRel(("HM: INTEL_PT = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_INTEL_PT)));
1449 LogRel(("HM: SMM_READ_SMBASE_MSR = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_SMM_READ_SMBASE_MSR)));
1450 LogRel(("HM: CR3_TARGET = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_CR3_TARGET)));
1451 LogRel(("HM: MAX_MSR = %#x ( %u )\n", RT_BF_GET(fMisc, VMX_BF_MISC_MAX_MSRS),
1452 VMX_MISC_MAX_MSRS(fMisc)));
1453 LogRel(("HM: VMXOFF_BLOCK_SMI = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_VMXOFF_BLOCK_SMI)));
1454 LogRel(("HM: VMWRITE_ALL = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_VMWRITE_ALL)));
1455 LogRel(("HM: ENTRY_INJECT_SOFT_INT = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_ENTRY_INJECT_SOFT_INT)));
1456 LogRel(("HM: MSEG_ID = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_MSEG_ID)));
1457}
1458
1459
1460/**
1461 * Reports MSR_IA32_VMX_VMCS_ENUM MSR to the log.
1462 *
1463 * @param uVmcsEnum The VMX VMCS enum MSR value.
1464 */
1465static void hmR3VmxReportVmcsEnumMsr(uint64_t uVmcsEnum)
1466{
1467 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %#RX64\n", uVmcsEnum));
1468 LogRel(("HM: HIGHEST_IDX = %#x\n", RT_BF_GET(uVmcsEnum, VMX_BF_VMCS_ENUM_HIGHEST_IDX)));
1469}
1470
1471
1472/**
1473 * Reports MSR_IA32_VMX_VMFUNC MSR to the log.
1474 *
1475 * @param uVmFunc The VMX VMFUNC MSR value.
1476 */
1477static void hmR3VmxReportVmFuncMsr(uint64_t uVmFunc)
1478{
1479 LogRel(("HM: MSR_IA32_VMX_VMFUNC = %#RX64\n", uVmFunc));
1480 HMVMX_REPORT_ALLOWED_FEAT(uVmFunc, "EPTP_SWITCHING", RT_BF_GET(uVmFunc, VMX_BF_VMFUNC_EPTP_SWITCHING));
1481}
1482
1483
1484/**
1485 * Reports VMX CR0, CR4 fixed MSRs.
1486 *
1487 * @param pMsrs Pointer to the VMX MSRs.
1488 */
1489static void hmR3VmxReportCrFixedMsrs(PVMXMSRS pMsrs)
1490{
1491 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %#RX64\n", pMsrs->u64Cr0Fixed0));
1492 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %#RX64\n", pMsrs->u64Cr0Fixed1));
1493 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %#RX64\n", pMsrs->u64Cr4Fixed0));
1494 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %#RX64\n", pMsrs->u64Cr4Fixed1));
1495}
1496
1497
1498/**
1499 * Finish VT-x initialization (after ring-0 init).
1500 *
1501 * @returns VBox status code.
1502 * @param pVM The cross context VM structure.
1503 */
1504static int hmR3InitFinalizeR0Intel(PVM pVM)
1505{
1506 int rc;
1507
1508 LogFunc(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
1509 AssertLogRelReturn(pVM->hm.s.ForR3.vmx.u64HostFeatCtrl != 0, VERR_HM_IPE_4);
1510
1511 LogRel(("HM: Using VT-x implementation 3.0\n"));
1512 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoopsCfg));
1513 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostCr4));
1514 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostMsrEfer));
1515 LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostSmmMonitorCtl));
1516
1517 hmR3VmxReportFeatCtlMsr(pVM->hm.s.ForR3.vmx.u64HostFeatCtrl);
1518 hmR3VmxReportBasicMsr(pVM->hm.s.ForR3.vmx.Msrs.u64Basic);
1519
1520 hmR3VmxReportPinBasedCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.PinCtls);
1521 hmR3VmxReportProcBasedCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.ProcCtls);
1522 if (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1523 hmR3VmxReportProcBasedCtls2Msr(&pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2);
1524 if (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1525 hmR3VmxReportProcBasedCtls3Msr(pVM->hm.s.ForR3.vmx.Msrs.u64ProcCtls3);
1526
1527 hmR3VmxReportEntryCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.EntryCtls);
1528 hmR3VmxReportExitCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.ExitCtls);
1529
1530 if (RT_BF_GET(pVM->hm.s.ForR3.vmx.Msrs.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
1531 {
1532 /* We don't extensively dump the true capability MSRs as we don't use them, see @bugref{9180#c5}. */
1533 LogRel(("HM: MSR_IA32_VMX_TRUE_PINBASED_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TruePinCtls));
1534 LogRel(("HM: MSR_IA32_VMX_TRUE_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueProcCtls));
1535 LogRel(("HM: MSR_IA32_VMX_TRUE_ENTRY_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueEntryCtls));
1536 LogRel(("HM: MSR_IA32_VMX_TRUE_EXIT_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueExitCtls));
1537 }
1538
1539 hmR3VmxReportMiscMsr(pVM, pVM->hm.s.ForR3.vmx.Msrs.u64Misc);
1540 hmR3VmxReportVmcsEnumMsr(pVM->hm.s.ForR3.vmx.Msrs.u64VmcsEnum);
1541 if (pVM->hm.s.ForR3.vmx.Msrs.u64EptVpidCaps)
1542 hmR3VmxReportEptVpidCapsMsr(pVM->hm.s.ForR3.vmx.Msrs.u64EptVpidCaps);
1543 if (pVM->hm.s.ForR3.vmx.Msrs.u64VmFunc)
1544 hmR3VmxReportVmFuncMsr(pVM->hm.s.ForR3.vmx.Msrs.u64VmFunc);
1545 hmR3VmxReportCrFixedMsrs(&pVM->hm.s.ForR3.vmx.Msrs);
1546
1547#ifdef TODO_9217_VMCSINFO
1548 LogRel(("HM: APIC-access page physaddr = %#RHp\n", pVM->hm.s.vmx.HCPhysApicAccess));
1549 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1550 {
1551 PCVMXVMCSINFOSHARED pVmcsInfo = &pVM->apCpusR3[idCpu]->hm.s.vmx.VmcsInfo;
1552 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", idCpu, pVmcsInfo->HCPhysMsrBitmap));
1553 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", idCpu, pVmcsInfo->HCPhysVmcs));
1554 }
1555#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1556 if (pVM->cpum.ro.GuestFeatures.fVmx)
1557 {
1558 LogRel(("HM: Nested-guest:\n"));
1559 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1560 {
1561 PCVMXVMCSINFOSHARED pVmcsInfoNstGst = &pVM->apCpusR3[idCpu]->hm.s.vmx.VmcsInfoNstGst;
1562 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", idCpu, pVmcsInfoNstGst->HCPhysMsrBitmap));
1563 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", idCpu, pVmcsInfoNstGst->HCPhysVmcs));
1564 }
1565 }
1566#endif
1567#endif /* TODO_9217_VMCSINFO */
1568
1569 /*
1570 * EPT and unrestricted guest execution are determined in HMR3Init, verify the sanity of that.
1571 */
1572 AssertLogRelReturn( !pVM->hm.s.fNestedPagingCfg
1573 || (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_EPT),
1574 VERR_HM_IPE_1);
1575 AssertLogRelReturn( !pVM->hm.s.vmx.fUnrestrictedGuestCfg
1576 || ( (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
1577 && pVM->hm.s.fNestedPagingCfg),
1578 VERR_HM_IPE_1);
1579
1580 /*
1581 * Disallow RDTSCP in the guest if there is no secondary process-based VM execution controls as otherwise
1582 * RDTSCP would cause a #UD. There might be no CPUs out there where this happens, as RDTSCP was introduced
1583 * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel...
1584 */
1585 if ( !(pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1586 && CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
1587 {
1588 CPUMR3ClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
1589 LogRel(("HM: Disabled RDTSCP\n"));
1590 }
1591
1592 if (!pVM->hm.s.vmx.fUnrestrictedGuestCfg)
1593 {
1594 /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
1595 rc = PDMR3VmmDevHeapAlloc(pVM, HM_VTX_TOTAL_DEVHEAP_MEM, hmR3VmmDevHeapNotify, (RTR3PTR *)&pVM->hm.s.vmx.pRealModeTSS);
1596 if (RT_SUCCESS(rc))
1597 {
1598 /* The IO bitmap starts right after the virtual interrupt redirection bitmap.
1599 Refer Intel spec. 20.3.3 "Software Interrupt Handling in Virtual-8086 mode"
1600 esp. Figure 20-5.*/
1601 ASMMemZero32(pVM->hm.s.vmx.pRealModeTSS, sizeof(*pVM->hm.s.vmx.pRealModeTSS));
1602 pVM->hm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hm.s.vmx.pRealModeTSS);
1603
1604 /* Bit set to 0 means software interrupts are redirected to the
1605 8086 program interrupt handler rather than switching to
1606 protected-mode handler. */
1607 memset(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap, 0, sizeof(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap));
1608
1609 /* Allow all port IO, so that port IO instructions do not cause
1610 exceptions and would instead cause a VM-exit (based on VT-x's
1611 IO bitmap which we currently configure to always cause an exit). */
1612 memset(pVM->hm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE * 2);
1613 *((unsigned char *)pVM->hm.s.vmx.pRealModeTSS + HM_VTX_TSS_SIZE - 2) = 0xff;
1614
1615 /*
1616 * Construct a 1024 element page directory with 4 MB pages for the identity mapped
1617 * page table used in real and protected mode without paging with EPT.
1618 */
1619 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
1620 for (uint32_t i = 0; i < X86_PG_ENTRIES; i++)
1621 {
1622 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
1623 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US
1624 | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS
1625 | X86_PDE4M_G;
1626 }
1627
1628 /* We convert it here every time as PCI regions could be reconfigured. */
1629 if (PDMVmmDevHeapIsEnabled(pVM))
1630 {
1631 RTGCPHYS GCPhys;
1632 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
1633 AssertRCReturn(rc, rc);
1634 LogRel(("HM: Real Mode TSS guest physaddr = %#RGp\n", GCPhys));
1635
1636 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
1637 AssertRCReturn(rc, rc);
1638 LogRel(("HM: Non-Paging Mode EPT CR3 = %#RGp\n", GCPhys));
1639 }
1640 }
1641 else
1642 {
1643 LogRel(("HM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
1644 pVM->hm.s.vmx.pRealModeTSS = NULL;
1645 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = NULL;
1646 return VMSetError(pVM, rc, RT_SRC_POS,
1647 "HM failure: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)", rc);
1648 }
1649 }
1650
1651 LogRel((pVM->hm.s.fAllow64BitGuestsCfg ? "HM: Guest support: 32-bit and 64-bit\n"
1652 : "HM: Guest support: 32-bit only\n"));
1653
1654 /*
1655 * Call ring-0 to set up the VM.
1656 */
1657 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /* idCpu */, VMMR0_DO_HM_SETUP_VM, 0 /* u64Arg */, NULL /* pReqHdr */);
1658 if (rc != VINF_SUCCESS)
1659 {
1660 LogRel(("HM: VMX setup failed with rc=%Rrc!\n", rc));
1661 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1662 {
1663 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1664 LogRel(("HM: CPU[%u] Last instruction error %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32InstrError));
1665 LogRel(("HM: CPU[%u] HM error %#x (%u)\n", idCpu, pVCpu->hm.s.u32HMError, pVCpu->hm.s.u32HMError));
1666 }
1667 HMR3CheckError(pVM, rc);
1668 return VMSetError(pVM, rc, RT_SRC_POS, "VT-x setup failed: %Rrc", rc);
1669 }
1670
1671 LogRel(("HM: Supports VMCS EFER fields = %RTbool\n", pVM->hm.s.ForR3.vmx.fSupportsVmcsEfer));
1672 LogRel(("HM: Enabled VMX\n"));
1673 pVM->hm.s.vmx.fEnabled = true;
1674
1675 hmR3DisableRawMode(pVM); /** @todo make this go away! */
1676
1677 /*
1678 * Change the CPU features.
1679 */
1680 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1681 if (pVM->hm.s.fAllow64BitGuestsCfg)
1682 {
1683 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1684 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1685 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* (Long mode only on Intel CPUs.) */
1686 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1687 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1688 }
1689 /* Given that we're on a long mode host, we can simply enable NX for PAE capable guests. */
1690 else if (CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1691 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1692
1693 /*
1694 * Log configuration details.
1695 */
1696 if (pVM->hm.s.fNestedPagingCfg)
1697 {
1698 LogRel(("HM: Enabled nested paging\n"));
1699 if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_SINGLE_CONTEXT)
1700 LogRel(("HM: EPT flush type = Single context\n"));
1701 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_ALL_CONTEXTS)
1702 LogRel(("HM: EPT flush type = All contexts\n"));
1703 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_NOT_SUPPORTED)
1704 LogRel(("HM: EPT flush type = Not supported\n"));
1705 else
1706 LogRel(("HM: EPT flush type = %#x\n", pVM->hm.s.ForR3.vmx.enmTlbFlushEpt));
1707
1708 if (pVM->hm.s.vmx.fUnrestrictedGuestCfg)
1709 LogRel(("HM: Enabled unrestricted guest execution\n"));
1710
1711 if (pVM->hm.s.fLargePages)
1712 {
1713 /* Use large (2 MB) pages for our EPT PDEs where possible. */
1714 PGMSetLargePageUsage(pVM, true);
1715 LogRel(("HM: Enabled large page support\n"));
1716 }
1717 }
1718 else
1719 Assert(!pVM->hm.s.vmx.fUnrestrictedGuestCfg);
1720
1721 if (pVM->hm.s.ForR3.vmx.fVpid)
1722 {
1723 LogRel(("HM: Enabled VPID\n"));
1724 if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_INDIV_ADDR)
1725 LogRel(("HM: VPID flush type = Individual addresses\n"));
1726 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
1727 LogRel(("HM: VPID flush type = Single context\n"));
1728 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS)
1729 LogRel(("HM: VPID flush type = All contexts\n"));
1730 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1731 LogRel(("HM: VPID flush type = Single context retain globals\n"));
1732 else
1733 LogRel(("HM: VPID flush type = %#x\n", pVM->hm.s.ForR3.vmx.enmTlbFlushVpid));
1734 }
1735 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_NOT_SUPPORTED)
1736 LogRel(("HM: Ignoring VPID capabilities of CPU\n"));
1737
1738 if (pVM->hm.s.vmx.fUsePreemptTimerCfg)
1739 LogRel(("HM: Enabled VMX-preemption timer (cPreemptTimerShift=%u)\n", pVM->hm.s.vmx.cPreemptTimerShift));
1740 else
1741 LogRel(("HM: Disabled VMX-preemption timer\n"));
1742
1743 if (pVM->hm.s.fVirtApicRegs)
1744 LogRel(("HM: Enabled APIC-register virtualization support\n"));
1745
1746 if (pVM->hm.s.fPostedIntrs)
1747 LogRel(("HM: Enabled posted-interrupt processing support\n"));
1748
1749 if (pVM->hm.s.ForR3.vmx.fUseVmcsShadowing)
1750 {
1751 bool const fFullVmcsShadow = RT_BOOL(pVM->hm.s.ForR3.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL);
1752 LogRel(("HM: Enabled %s VMCS shadowing\n", fFullVmcsShadow ? "full" : "partial"));
1753 }
1754
1755 return VINF_SUCCESS;
1756}
1757
1758
1759/**
1760 * Finish AMD-V initialization (after ring-0 init).
1761 *
1762 * @returns VBox status code.
1763 * @param pVM The cross context VM structure.
1764 */
1765static int hmR3InitFinalizeR0Amd(PVM pVM)
1766{
1767 LogFunc(("pVM->hm.s.svm.fSupported = %d\n", pVM->hm.s.svm.fSupported));
1768
1769 LogRel(("HM: Using AMD-V implementation 2.0\n"));
1770
1771 uint32_t u32Family;
1772 uint32_t u32Model;
1773 uint32_t u32Stepping;
1774 if (HMIsSubjectToSvmErratum170(&u32Family, &u32Model, &u32Stepping))
1775 LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
1776 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoopsCfg));
1777 LogRel(("HM: AMD HWCR MSR = %#RX64\n", pVM->hm.s.ForR3.svm.u64MsrHwcr));
1778 LogRel(("HM: AMD-V revision = %#x\n", pVM->hm.s.ForR3.svm.u32Rev));
1779 LogRel(("HM: AMD-V max ASID = %RU32\n", pVM->hm.s.ForR3.uMaxAsid));
1780 LogRel(("HM: AMD-V features = %#x\n", pVM->hm.s.ForR3.svm.fFeatures));
1781
1782 /*
1783 * Enumerate AMD-V features.
1784 */
1785 static const struct { uint32_t fFlag; const char *pszName; } s_aSvmFeatures[] =
1786 {
1787#define HMSVM_REPORT_FEATURE(a_StrDesc, a_Define) { a_Define, a_StrDesc }
1788 HMSVM_REPORT_FEATURE("NESTED_PAGING", X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
1789 HMSVM_REPORT_FEATURE("LBR_VIRT", X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
1790 HMSVM_REPORT_FEATURE("SVM_LOCK", X86_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
1791 HMSVM_REPORT_FEATURE("NRIP_SAVE", X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
1792 HMSVM_REPORT_FEATURE("TSC_RATE_MSR", X86_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
1793 HMSVM_REPORT_FEATURE("VMCB_CLEAN", X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
1794 HMSVM_REPORT_FEATURE("FLUSH_BY_ASID", X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
1795 HMSVM_REPORT_FEATURE("DECODE_ASSISTS", X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS),
1796 HMSVM_REPORT_FEATURE("PAUSE_FILTER", X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
1797 HMSVM_REPORT_FEATURE("PAUSE_FILTER_THRESHOLD", X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD),
1798 HMSVM_REPORT_FEATURE("AVIC", X86_CPUID_SVM_FEATURE_EDX_AVIC),
1799 HMSVM_REPORT_FEATURE("VIRT_VMSAVE_VMLOAD", X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD),
1800 HMSVM_REPORT_FEATURE("VGIF", X86_CPUID_SVM_FEATURE_EDX_VGIF),
1801 HMSVM_REPORT_FEATURE("GMET", X86_CPUID_SVM_FEATURE_EDX_GMET),
1802#undef HMSVM_REPORT_FEATURE
1803 };
1804
1805 uint32_t fSvmFeatures = pVM->hm.s.ForR3.svm.fFeatures;
1806 for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++)
1807 if (fSvmFeatures & s_aSvmFeatures[i].fFlag)
1808 {
1809 LogRel(("HM: %s\n", s_aSvmFeatures[i].pszName));
1810 fSvmFeatures &= ~s_aSvmFeatures[i].fFlag;
1811 }
1812 if (fSvmFeatures)
1813 for (unsigned iBit = 0; iBit < 32; iBit++)
1814 if (RT_BIT_32(iBit) & fSvmFeatures)
1815 LogRel(("HM: Reserved bit %u\n", iBit));
1816
1817 /*
1818 * Nested paging is determined in HMR3Init, verify the sanity of that.
1819 */
1820 AssertLogRelReturn( !pVM->hm.s.fNestedPagingCfg
1821 || (pVM->hm.s.ForR3.svm.fFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
1822 VERR_HM_IPE_1);
1823
1824#if 0
1825 /** @todo Add and query IPRT API for host OS support for posted-interrupt IPI
1826 * here. */
1827 if (RTR0IsPostIpiSupport())
1828 pVM->hm.s.fPostedIntrs = true;
1829#endif
1830
1831 /*
1832 * Determine whether we need to intercept #UD in SVM mode for emulating
1833 * intel SYSENTER/SYSEXIT on AMD64, as these instructions results in #UD
1834 * when executed in long-mode. This is only really applicable when
1835 * non-default CPU profiles are in effect, i.e. guest vendor differs
1836 * from the host one.
1837 */
1838 if (CPUMGetGuestCpuVendor(pVM) != CPUMGetHostCpuVendor(pVM))
1839 switch (CPUMGetGuestCpuVendor(pVM))
1840 {
1841 case CPUMCPUVENDOR_INTEL:
1842 case CPUMCPUVENDOR_VIA: /*?*/
1843 case CPUMCPUVENDOR_SHANGHAI: /*?*/
1844 switch (CPUMGetHostCpuVendor(pVM))
1845 {
1846 case CPUMCPUVENDOR_AMD:
1847 case CPUMCPUVENDOR_HYGON:
1848 if (pVM->hm.s.fAllow64BitGuestsCfg)
1849 {
1850 LogRel(("HM: Intercepting #UD for emulating SYSENTER/SYSEXIT in long mode.\n"));
1851 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1852 pVM->apCpusR3[idCpu]->hm.s.svm.fEmulateLongModeSysEnterExit = true;
1853 }
1854 break;
1855 default: break;
1856 }
1857 default: break;
1858 }
1859
1860 /*
1861 * Call ring-0 to set up the VM.
1862 */
1863 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_HM_SETUP_VM, 0, NULL);
1864 if (rc != VINF_SUCCESS)
1865 {
1866 AssertMsgFailed(("%Rrc\n", rc));
1867 LogRel(("HM: AMD-V setup failed with rc=%Rrc!\n", rc));
1868 return VMSetError(pVM, rc, RT_SRC_POS, "AMD-V setup failed: %Rrc", rc);
1869 }
1870
1871 LogRel(("HM: Enabled SVM\n"));
1872 pVM->hm.s.svm.fEnabled = true;
1873
1874 if (pVM->hm.s.fNestedPagingCfg)
1875 {
1876 LogRel(("HM: Enabled nested paging\n"));
1877
1878 /*
1879 * Enable large pages (2 MB) if applicable.
1880 */
1881 if (pVM->hm.s.fLargePages)
1882 {
1883 PGMSetLargePageUsage(pVM, true);
1884 LogRel(("HM: Enabled large page support\n"));
1885 }
1886 }
1887
1888 if (pVM->hm.s.fVirtApicRegs)
1889 LogRel(("HM: Enabled APIC-register virtualization support\n"));
1890
1891 if (pVM->hm.s.fPostedIntrs)
1892 LogRel(("HM: Enabled posted-interrupt processing support\n"));
1893
1894 hmR3DisableRawMode(pVM);
1895
1896 /*
1897 * Change the CPU features.
1898 */
1899 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1900 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
1901 if (pVM->hm.s.fAllow64BitGuestsCfg)
1902 {
1903 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1904 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1905 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1906 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1907 }
1908 /* Turn on NXE if PAE has been enabled. */
1909 else if (CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1910 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1911
1912 LogRel((pVM->hm.s.fTprPatchingAllowed ? "HM: Enabled TPR patching\n"
1913 : "HM: Disabled TPR patching\n"));
1914
1915 LogRel((pVM->hm.s.fAllow64BitGuestsCfg ? "HM: Guest support: 32-bit and 64-bit\n"
1916 : "HM: Guest support: 32-bit only\n"));
1917 return VINF_SUCCESS;
1918}
1919
1920
1921/**
1922 * Applies relocations to data and code managed by this
1923 * component. This function will be called at init and
1924 * whenever the VMM need to relocate it self inside the GC.
1925 *
1926 * @param pVM The cross context VM structure.
1927 */
1928VMMR3_INT_DECL(void) HMR3Relocate(PVM pVM)
1929{
1930 /* Fetch the current paging mode during the relocate callback during state loading. */
1931 if (VMR3GetState(pVM) == VMSTATE_LOADING)
1932 {
1933 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1934 {
1935 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1936 pVCpu->hm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
1937 }
1938 }
1939}
1940
1941
1942/**
1943 * Terminates the HM.
1944 *
1945 * Termination means cleaning up and freeing all resources,
1946 * the VM itself is, at this point, powered off or suspended.
1947 *
1948 * @returns VBox status code.
1949 * @param pVM The cross context VM structure.
1950 */
1951VMMR3_INT_DECL(int) HMR3Term(PVM pVM)
1952{
1953 if (pVM->hm.s.vmx.pRealModeTSS)
1954 {
1955 PDMR3VmmDevHeapFree(pVM, pVM->hm.s.vmx.pRealModeTSS);
1956 pVM->hm.s.vmx.pRealModeTSS = 0;
1957 }
1958 hmR3TermCPU(pVM);
1959 return 0;
1960}
1961
1962
1963/**
1964 * Terminates the per-VCPU HM.
1965 *
1966 * @returns VBox status code.
1967 * @param pVM The cross context VM structure.
1968 */
1969static int hmR3TermCPU(PVM pVM)
1970{
1971 RT_NOREF(pVM);
1972 return VINF_SUCCESS;
1973}
1974
1975
1976/**
1977 * Resets a virtual CPU.
1978 *
1979 * Used by HMR3Reset and CPU hot plugging.
1980 *
1981 * @param pVCpu The cross context virtual CPU structure to reset.
1982 */
1983VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu)
1984{
1985 /* Sync. entire state on VM reset ring-0 re-entry. It's safe to reset
1986 the HM flags here, all other EMTs are in ring-3. See VMR3Reset(). */
1987 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
1988
1989 pVCpu->hm.s.fActive = false;
1990 pVCpu->hm.s.Event.fPending = false;
1991 pVCpu->hm.s.vmx.u64GstMsrApicBase = 0;
1992 pVCpu->hm.s.vmx.VmcsInfo.fWasInRealMode = true;
1993#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1994 if (pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmx)
1995 pVCpu->hm.s.vmx.VmcsInfoNstGst.fWasInRealMode = true;
1996#endif
1997}
1998
1999
2000/**
2001 * The VM is being reset.
2002 *
2003 * For the HM component this means that any GDT/LDT/TSS monitors
2004 * needs to be removed.
2005 *
2006 * @param pVM The cross context VM structure.
2007 */
2008VMMR3_INT_DECL(void) HMR3Reset(PVM pVM)
2009{
2010 LogFlow(("HMR3Reset:\n"));
2011
2012 if (HMIsEnabled(pVM))
2013 hmR3DisableRawMode(pVM);
2014
2015 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2016 HMR3ResetCpu(pVM->apCpusR3[idCpu]);
2017
2018 /* Clear all patch information. */
2019 pVM->hm.s.pGuestPatchMem = 0;
2020 pVM->hm.s.pFreeGuestPatchMem = 0;
2021 pVM->hm.s.cbGuestPatchMem = 0;
2022 pVM->hm.s.cPatches = 0;
2023 pVM->hm.s.PatchTree = 0;
2024 pVM->hm.s.fTprPatchingActive = false;
2025 ASMMemZero32(pVM->hm.s.aPatches, sizeof(pVM->hm.s.aPatches));
2026}
2027
2028
2029/**
2030 * Callback to patch a TPR instruction (vmmcall or mov cr8).
2031 *
2032 * @returns VBox strict status code.
2033 * @param pVM The cross context VM structure.
2034 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2035 * @param pvUser Unused.
2036 */
2037static DECLCALLBACK(VBOXSTRICTRC) hmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
2038{
2039 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2040
2041 /* Only execute the handler on the VCPU the original patch request was issued. */
2042 if (pVCpu->idCpu != idCpu)
2043 return VINF_SUCCESS;
2044
2045 Log(("hmR3RemovePatches\n"));
2046 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
2047 {
2048 uint8_t abInstr[15];
2049 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
2050 RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
2051 int rc;
2052
2053#ifdef LOG_ENABLED
2054 char szOutput[256];
2055 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2056 szOutput, sizeof(szOutput), NULL);
2057 if (RT_SUCCESS(rc))
2058 Log(("Patched instr: %s\n", szOutput));
2059#endif
2060
2061 /* Check if the instruction is still the same. */
2062 rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pInstrGC, pPatch->cbNewOp);
2063 if (rc != VINF_SUCCESS)
2064 {
2065 Log(("Patched code removed? (rc=%Rrc0\n", rc));
2066 continue; /* swapped out or otherwise removed; skip it. */
2067 }
2068
2069 if (memcmp(abInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
2070 {
2071 Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
2072 continue; /* skip it. */
2073 }
2074
2075 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
2076 AssertRC(rc);
2077
2078#ifdef LOG_ENABLED
2079 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2080 szOutput, sizeof(szOutput), NULL);
2081 if (RT_SUCCESS(rc))
2082 Log(("Original instr: %s\n", szOutput));
2083#endif
2084 }
2085 pVM->hm.s.cPatches = 0;
2086 pVM->hm.s.PatchTree = 0;
2087 pVM->hm.s.pFreeGuestPatchMem = pVM->hm.s.pGuestPatchMem;
2088 pVM->hm.s.fTprPatchingActive = false;
2089 return VINF_SUCCESS;
2090}
2091
2092
2093/**
2094 * Worker for enabling patching in a VT-x/AMD-V guest.
2095 *
2096 * @returns VBox status code.
2097 * @param pVM The cross context VM structure.
2098 * @param idCpu VCPU to execute hmR3RemovePatches on.
2099 * @param pPatchMem Patch memory range.
2100 * @param cbPatchMem Size of the memory range.
2101 */
2102static DECLCALLBACK(int) hmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
2103{
2104 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches, (void *)(uintptr_t)idCpu);
2105 AssertRC(rc);
2106
2107 pVM->hm.s.pGuestPatchMem = pPatchMem;
2108 pVM->hm.s.pFreeGuestPatchMem = pPatchMem;
2109 pVM->hm.s.cbGuestPatchMem = cbPatchMem;
2110 return VINF_SUCCESS;
2111}
2112
2113
2114/**
2115 * Enable patching in a VT-x/AMD-V guest
2116 *
2117 * @returns VBox status code.
2118 * @param pVM The cross context VM structure.
2119 * @param pPatchMem Patch memory range.
2120 * @param cbPatchMem Size of the memory range.
2121 */
2122VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
2123{
2124 VM_ASSERT_EMT(pVM);
2125 Log(("HMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
2126 if (pVM->cCpus > 1)
2127 {
2128 /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
2129 int rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE,
2130 (PFNRT)hmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
2131 AssertRC(rc);
2132 return rc;
2133 }
2134 return hmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
2135}
2136
2137
2138/**
2139 * Disable patching in a VT-x/AMD-V guest.
2140 *
2141 * @returns VBox status code.
2142 * @param pVM The cross context VM structure.
2143 * @param pPatchMem Patch memory range.
2144 * @param cbPatchMem Size of the memory range.
2145 */
2146VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
2147{
2148 Log(("HMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
2149 RT_NOREF2(pPatchMem, cbPatchMem);
2150
2151 Assert(pVM->hm.s.pGuestPatchMem == pPatchMem);
2152 Assert(pVM->hm.s.cbGuestPatchMem == cbPatchMem);
2153
2154 /** @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
2155 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches,
2156 (void *)(uintptr_t)VMMGetCpuId(pVM));
2157 AssertRC(rc);
2158
2159 pVM->hm.s.pGuestPatchMem = 0;
2160 pVM->hm.s.pFreeGuestPatchMem = 0;
2161 pVM->hm.s.cbGuestPatchMem = 0;
2162 pVM->hm.s.fTprPatchingActive = false;
2163 return VINF_SUCCESS;
2164}
2165
2166
2167/**
2168 * Callback to patch a TPR instruction (vmmcall or mov cr8).
2169 *
2170 * @returns VBox strict status code.
2171 * @param pVM The cross context VM structure.
2172 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2173 * @param pvUser User specified CPU context.
2174 *
2175 */
2176static DECLCALLBACK(VBOXSTRICTRC) hmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
2177{
2178 /*
2179 * Only execute the handler on the VCPU the original patch request was
2180 * issued. (The other CPU(s) might not yet have switched to protected
2181 * mode, nor have the correct memory context.)
2182 */
2183 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2184 if (pVCpu->idCpu != idCpu)
2185 return VINF_SUCCESS;
2186
2187 /*
2188 * We're racing other VCPUs here, so don't try patch the instruction twice
2189 * and make sure there is still room for our patch record.
2190 */
2191 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2192 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
2193 if (pPatch)
2194 {
2195 Log(("hmR3ReplaceTprInstr: already patched %RGv\n", pCtx->rip));
2196 return VINF_SUCCESS;
2197 }
2198 uint32_t const idx = pVM->hm.s.cPatches;
2199 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
2200 {
2201 Log(("hmR3ReplaceTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
2202 return VINF_SUCCESS;
2203 }
2204 pPatch = &pVM->hm.s.aPatches[idx];
2205
2206 Log(("hmR3ReplaceTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
2207
2208 /*
2209 * Disassembler the instruction and get cracking.
2210 */
2211 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3ReplaceTprInstr");
2212 DISCPUSTATE Dis;
2213 uint32_t cbOp;
2214 int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbOp);
2215 AssertRC(rc);
2216 if ( rc == VINF_SUCCESS
2217 && Dis.pCurInstr->uOpcode == OP_MOV
2218 && cbOp >= 3)
2219 {
2220 static uint8_t const s_abVMMCall[3] = { 0x0f, 0x01, 0xd9 };
2221
2222 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
2223 AssertRC(rc);
2224
2225 pPatch->cbOp = cbOp;
2226
2227 if (Dis.Param1.fUse == DISUSE_DISPLACEMENT32)
2228 {
2229 /* write. */
2230 if (Dis.Param2.fUse == DISUSE_REG_GEN32)
2231 {
2232 pPatch->enmType = HMTPRINSTR_WRITE_REG;
2233 pPatch->uSrcOperand = Dis.Param2.Base.idxGenReg;
2234 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_REG %u\n", Dis.Param2.Base.idxGenReg));
2235 }
2236 else
2237 {
2238 Assert(Dis.Param2.fUse == DISUSE_IMMEDIATE32);
2239 pPatch->enmType = HMTPRINSTR_WRITE_IMM;
2240 pPatch->uSrcOperand = Dis.Param2.uValue;
2241 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_IMM %#llx\n", Dis.Param2.uValue));
2242 }
2243 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
2244 AssertRC(rc);
2245
2246 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
2247 pPatch->cbNewOp = sizeof(s_abVMMCall);
2248 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessVmc);
2249 }
2250 else
2251 {
2252 /*
2253 * TPR Read.
2254 *
2255 * Found:
2256 * mov eax, dword [fffe0080] (5 bytes)
2257 * Check if next instruction is:
2258 * shr eax, 4
2259 */
2260 Assert(Dis.Param1.fUse == DISUSE_REG_GEN32);
2261
2262 uint8_t const idxMmioReg = Dis.Param1.Base.idxGenReg;
2263 uint8_t const cbOpMmio = cbOp;
2264 uint64_t const uSavedRip = pCtx->rip;
2265
2266 pCtx->rip += cbOp;
2267 rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbOp);
2268 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Following read");
2269 pCtx->rip = uSavedRip;
2270
2271 if ( rc == VINF_SUCCESS
2272 && Dis.pCurInstr->uOpcode == OP_SHR
2273 && Dis.Param1.fUse == DISUSE_REG_GEN32
2274 && Dis.Param1.Base.idxGenReg == idxMmioReg
2275 && Dis.Param2.fUse == DISUSE_IMMEDIATE8
2276 && Dis.Param2.uValue == 4
2277 && cbOpMmio + cbOp < sizeof(pVM->hm.s.aPatches[idx].aOpcode))
2278 {
2279 uint8_t abInstr[15];
2280
2281 /* Replacing the two instructions above with an AMD-V specific lock-prefixed 32-bit MOV CR8 instruction so as to
2282 access CR8 in 32-bit mode and not cause a #VMEXIT. */
2283 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, cbOpMmio + cbOp);
2284 AssertRC(rc);
2285
2286 pPatch->cbOp = cbOpMmio + cbOp;
2287
2288 /* 0xf0, 0x0f, 0x20, 0xc0 = mov eax, cr8 */
2289 abInstr[0] = 0xf0;
2290 abInstr[1] = 0x0f;
2291 abInstr[2] = 0x20;
2292 abInstr[3] = 0xc0 | Dis.Param1.Base.idxGenReg;
2293 for (unsigned i = 4; i < pPatch->cbOp; i++)
2294 abInstr[i] = 0x90; /* nop */
2295
2296 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, abInstr, pPatch->cbOp);
2297 AssertRC(rc);
2298
2299 memcpy(pPatch->aNewOpcode, abInstr, pPatch->cbOp);
2300 pPatch->cbNewOp = pPatch->cbOp;
2301 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessCr8);
2302
2303 Log(("Acceptable read/shr candidate!\n"));
2304 pPatch->enmType = HMTPRINSTR_READ_SHR4;
2305 }
2306 else
2307 {
2308 pPatch->enmType = HMTPRINSTR_READ;
2309 pPatch->uDstOperand = idxMmioReg;
2310
2311 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
2312 AssertRC(rc);
2313
2314 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
2315 pPatch->cbNewOp = sizeof(s_abVMMCall);
2316 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessVmc);
2317 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_READ %u\n", pPatch->uDstOperand));
2318 }
2319 }
2320
2321 pPatch->Core.Key = pCtx->eip;
2322 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2323 AssertRC(rc);
2324
2325 pVM->hm.s.cPatches++;
2326 return VINF_SUCCESS;
2327 }
2328
2329 /*
2330 * Save invalid patch, so we will not try again.
2331 */
2332 Log(("hmR3ReplaceTprInstr: Failed to patch instr!\n"));
2333 pPatch->Core.Key = pCtx->eip;
2334 pPatch->enmType = HMTPRINSTR_INVALID;
2335 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2336 AssertRC(rc);
2337 pVM->hm.s.cPatches++;
2338 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceFailure);
2339 return VINF_SUCCESS;
2340}
2341
2342
2343/**
2344 * Callback to patch a TPR instruction (jump to generated code).
2345 *
2346 * @returns VBox strict status code.
2347 * @param pVM The cross context VM structure.
2348 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2349 * @param pvUser User specified CPU context.
2350 *
2351 */
2352static DECLCALLBACK(VBOXSTRICTRC) hmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
2353{
2354 /*
2355 * Only execute the handler on the VCPU the original patch request was
2356 * issued. (The other CPU(s) might not yet have switched to protected
2357 * mode, nor have the correct memory context.)
2358 */
2359 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2360 if (pVCpu->idCpu != idCpu)
2361 return VINF_SUCCESS;
2362
2363 /*
2364 * We're racing other VCPUs here, so don't try patch the instruction twice
2365 * and make sure there is still room for our patch record.
2366 */
2367 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2368 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
2369 if (pPatch)
2370 {
2371 Log(("hmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
2372 return VINF_SUCCESS;
2373 }
2374 uint32_t const idx = pVM->hm.s.cPatches;
2375 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
2376 {
2377 Log(("hmR3PatchTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
2378 return VINF_SUCCESS;
2379 }
2380 pPatch = &pVM->hm.s.aPatches[idx];
2381
2382 Log(("hmR3PatchTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
2383 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3PatchTprInstr");
2384
2385 /*
2386 * Disassemble the instruction and get cracking.
2387 */
2388 DISCPUSTATE Dis;
2389 uint32_t cbOp;
2390 int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbOp);
2391 AssertRC(rc);
2392 if ( rc == VINF_SUCCESS
2393 && Dis.pCurInstr->uOpcode == OP_MOV
2394 && cbOp >= 5)
2395 {
2396 uint8_t aPatch[64];
2397 uint32_t off = 0;
2398
2399 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
2400 AssertRC(rc);
2401
2402 pPatch->cbOp = cbOp;
2403 pPatch->enmType = HMTPRINSTR_JUMP_REPLACEMENT;
2404
2405 if (Dis.Param1.fUse == DISUSE_DISPLACEMENT32)
2406 {
2407 /*
2408 * TPR write:
2409 *
2410 * push ECX [51]
2411 * push EDX [52]
2412 * push EAX [50]
2413 * xor EDX,EDX [31 D2]
2414 * mov EAX,EAX [89 C0]
2415 * or
2416 * mov EAX,0000000CCh [B8 CC 00 00 00]
2417 * mov ECX,0C0000082h [B9 82 00 00 C0]
2418 * wrmsr [0F 30]
2419 * pop EAX [58]
2420 * pop EDX [5A]
2421 * pop ECX [59]
2422 * jmp return_address [E9 return_address]
2423 */
2424 bool fUsesEax = (Dis.Param2.fUse == DISUSE_REG_GEN32 && Dis.Param2.Base.idxGenReg == DISGREG_EAX);
2425
2426 aPatch[off++] = 0x51; /* push ecx */
2427 aPatch[off++] = 0x52; /* push edx */
2428 if (!fUsesEax)
2429 aPatch[off++] = 0x50; /* push eax */
2430 aPatch[off++] = 0x31; /* xor edx, edx */
2431 aPatch[off++] = 0xd2;
2432 if (Dis.Param2.fUse == DISUSE_REG_GEN32)
2433 {
2434 if (!fUsesEax)
2435 {
2436 aPatch[off++] = 0x89; /* mov eax, src_reg */
2437 aPatch[off++] = MAKE_MODRM(3, Dis.Param2.Base.idxGenReg, DISGREG_EAX);
2438 }
2439 }
2440 else
2441 {
2442 Assert(Dis.Param2.fUse == DISUSE_IMMEDIATE32);
2443 aPatch[off++] = 0xb8; /* mov eax, immediate */
2444 *(uint32_t *)&aPatch[off] = Dis.Param2.uValue;
2445 off += sizeof(uint32_t);
2446 }
2447 aPatch[off++] = 0xb9; /* mov ecx, 0xc0000082 */
2448 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2449 off += sizeof(uint32_t);
2450
2451 aPatch[off++] = 0x0f; /* wrmsr */
2452 aPatch[off++] = 0x30;
2453 if (!fUsesEax)
2454 aPatch[off++] = 0x58; /* pop eax */
2455 aPatch[off++] = 0x5a; /* pop edx */
2456 aPatch[off++] = 0x59; /* pop ecx */
2457 }
2458 else
2459 {
2460 /*
2461 * TPR read:
2462 *
2463 * push ECX [51]
2464 * push EDX [52]
2465 * push EAX [50]
2466 * mov ECX,0C0000082h [B9 82 00 00 C0]
2467 * rdmsr [0F 32]
2468 * mov EAX,EAX [89 C0]
2469 * pop EAX [58]
2470 * pop EDX [5A]
2471 * pop ECX [59]
2472 * jmp return_address [E9 return_address]
2473 */
2474 Assert(Dis.Param1.fUse == DISUSE_REG_GEN32);
2475
2476 if (Dis.Param1.Base.idxGenReg != DISGREG_ECX)
2477 aPatch[off++] = 0x51; /* push ecx */
2478 if (Dis.Param1.Base.idxGenReg != DISGREG_EDX )
2479 aPatch[off++] = 0x52; /* push edx */
2480 if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
2481 aPatch[off++] = 0x50; /* push eax */
2482
2483 aPatch[off++] = 0x31; /* xor edx, edx */
2484 aPatch[off++] = 0xd2;
2485
2486 aPatch[off++] = 0xb9; /* mov ecx, 0xc0000082 */
2487 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2488 off += sizeof(uint32_t);
2489
2490 aPatch[off++] = 0x0f; /* rdmsr */
2491 aPatch[off++] = 0x32;
2492
2493 if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
2494 {
2495 aPatch[off++] = 0x89; /* mov dst_reg, eax */
2496 aPatch[off++] = MAKE_MODRM(3, DISGREG_EAX, Dis.Param1.Base.idxGenReg);
2497 }
2498
2499 if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
2500 aPatch[off++] = 0x58; /* pop eax */
2501 if (Dis.Param1.Base.idxGenReg != DISGREG_EDX )
2502 aPatch[off++] = 0x5a; /* pop edx */
2503 if (Dis.Param1.Base.idxGenReg != DISGREG_ECX)
2504 aPatch[off++] = 0x59; /* pop ecx */
2505 }
2506 aPatch[off++] = 0xe9; /* jmp return_address */
2507 *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem + off + 4);
2508 off += sizeof(RTRCUINTPTR);
2509
2510 if (pVM->hm.s.pFreeGuestPatchMem + off <= pVM->hm.s.pGuestPatchMem + pVM->hm.s.cbGuestPatchMem)
2511 {
2512 /* Write new code to the patch buffer. */
2513 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hm.s.pFreeGuestPatchMem, aPatch, off);
2514 AssertRC(rc);
2515
2516#ifdef LOG_ENABLED
2517 uint32_t cbCurInstr;
2518 for (RTGCPTR GCPtrInstr = pVM->hm.s.pFreeGuestPatchMem;
2519 GCPtrInstr < pVM->hm.s.pFreeGuestPatchMem + off;
2520 GCPtrInstr += RT_MAX(cbCurInstr, 1))
2521 {
2522 char szOutput[256];
2523 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, GCPtrInstr, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2524 szOutput, sizeof(szOutput), &cbCurInstr);
2525 if (RT_SUCCESS(rc))
2526 Log(("Patch instr %s\n", szOutput));
2527 else
2528 Log(("%RGv: rc=%Rrc\n", GCPtrInstr, rc));
2529 }
2530#endif
2531
2532 pPatch->aNewOpcode[0] = 0xE9;
2533 *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
2534
2535 /* Overwrite the TPR instruction with a jump. */
2536 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
2537 AssertRC(rc);
2538
2539 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Jump");
2540
2541 pVM->hm.s.pFreeGuestPatchMem += off;
2542 pPatch->cbNewOp = 5;
2543
2544 pPatch->Core.Key = pCtx->eip;
2545 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2546 AssertRC(rc);
2547
2548 pVM->hm.s.cPatches++;
2549 pVM->hm.s.fTprPatchingActive = true;
2550 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchSuccess);
2551 return VINF_SUCCESS;
2552 }
2553
2554 Log(("Ran out of space in our patch buffer!\n"));
2555 }
2556 else
2557 Log(("hmR3PatchTprInstr: Failed to patch instr!\n"));
2558
2559
2560 /*
2561 * Save invalid patch, so we will not try again.
2562 */
2563 pPatch = &pVM->hm.s.aPatches[idx];
2564 pPatch->Core.Key = pCtx->eip;
2565 pPatch->enmType = HMTPRINSTR_INVALID;
2566 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2567 AssertRC(rc);
2568 pVM->hm.s.cPatches++;
2569 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchFailure);
2570 return VINF_SUCCESS;
2571}
2572
2573
2574/**
2575 * Attempt to patch TPR mmio instructions.
2576 *
2577 * @returns VBox status code.
2578 * @param pVM The cross context VM structure.
2579 * @param pVCpu The cross context virtual CPU structure.
2580 */
2581VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu)
2582{
2583 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE,
2584 pVM->hm.s.pGuestPatchMem ? hmR3PatchTprInstr : hmR3ReplaceTprInstr,
2585 (void *)(uintptr_t)pVCpu->idCpu);
2586 AssertRC(rc);
2587 return rc;
2588}
2589
2590
2591/**
2592 * Checks if we need to reschedule due to VMM device heap changes.
2593 *
2594 * @returns true if a reschedule is required, otherwise false.
2595 * @param pVM The cross context VM structure.
2596 * @param pCtx VM execution context.
2597 */
2598VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCCPUMCTX pCtx)
2599{
2600 /*
2601 * The VMM device heap is a requirement for emulating real-mode or protected-mode without paging
2602 * when the unrestricted guest execution feature is missing (VT-x only).
2603 */
2604 if ( pVM->hm.s.vmx.fEnabled
2605 && !pVM->hm.s.vmx.fUnrestrictedGuestCfg
2606 && CPUMIsGuestInRealModeEx(pCtx)
2607 && !PDMVmmDevHeapIsEnabled(pVM))
2608 return true;
2609
2610 return false;
2611}
2612
2613
2614/**
2615 * Noticiation callback from DBGF when interrupt breakpoints or generic debug
2616 * event settings changes.
2617 *
2618 * DBGF will call HMR3NotifyDebugEventChangedPerCpu on each CPU afterwards, this
2619 * function is just updating the VM globals.
2620 *
2621 * @param pVM The VM cross context VM structure.
2622 * @thread EMT(0)
2623 */
2624VMMR3_INT_DECL(void) HMR3NotifyDebugEventChanged(PVM pVM)
2625{
2626 /* Interrupts. */
2627 bool fUseDebugLoop = pVM->dbgf.ro.cSoftIntBreakpoints > 0
2628 || pVM->dbgf.ro.cHardIntBreakpoints > 0;
2629
2630 /* CPU Exceptions. */
2631 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_XCPT_FIRST;
2632 !fUseDebugLoop && enmEvent <= DBGFEVENT_XCPT_LAST;
2633 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2634 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2635
2636 /* Common VM exits. */
2637 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_FIRST;
2638 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_LAST_COMMON;
2639 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2640 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2641
2642 /* Vendor specific VM exits. */
2643 if (HMR3IsVmxEnabled(pVM->pUVM))
2644 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_VMX_FIRST;
2645 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_VMX_LAST;
2646 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2647 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2648 else
2649 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_SVM_FIRST;
2650 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_SVM_LAST;
2651 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2652 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2653
2654 /* Done. */
2655 pVM->hm.s.fUseDebugLoop = fUseDebugLoop;
2656}
2657
2658
2659/**
2660 * Follow up notification callback to HMR3NotifyDebugEventChanged for each CPU.
2661 *
2662 * HM uses this to combine the decision made by HMR3NotifyDebugEventChanged with
2663 * per CPU settings.
2664 *
2665 * @param pVM The VM cross context VM structure.
2666 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2667 */
2668VMMR3_INT_DECL(void) HMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu)
2669{
2670 pVCpu->hm.s.fUseDebugLoop = pVCpu->hm.s.fSingleInstruction | pVM->hm.s.fUseDebugLoop;
2671}
2672
2673
2674/**
2675 * Checks if we are currently using hardware acceleration.
2676 *
2677 * @returns true if hardware acceleration is being used, otherwise false.
2678 * @param pVCpu The cross context virtual CPU structure.
2679 */
2680VMMR3_INT_DECL(bool) HMR3IsActive(PCVMCPU pVCpu)
2681{
2682 return pVCpu->hm.s.fActive;
2683}
2684
2685
2686/**
2687 * External interface for querying whether hardware acceleration is enabled.
2688 *
2689 * @returns true if VT-x or AMD-V is being used, otherwise false.
2690 * @param pUVM The user mode VM handle.
2691 * @sa HMIsEnabled, HMIsEnabledNotMacro.
2692 */
2693VMMR3DECL(bool) HMR3IsEnabled(PUVM pUVM)
2694{
2695 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2696 PVM pVM = pUVM->pVM;
2697 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2698 return pVM->fHMEnabled; /* Don't use the macro as the GUI may query us very very early. */
2699}
2700
2701
2702/**
2703 * External interface for querying whether VT-x is being used.
2704 *
2705 * @returns true if VT-x is being used, otherwise false.
2706 * @param pUVM The user mode VM handle.
2707 * @sa HMR3IsSvmEnabled, HMIsEnabled
2708 */
2709VMMR3DECL(bool) HMR3IsVmxEnabled(PUVM pUVM)
2710{
2711 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2712 PVM pVM = pUVM->pVM;
2713 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2714 return pVM->hm.s.vmx.fEnabled
2715 && pVM->hm.s.vmx.fSupported
2716 && pVM->fHMEnabled;
2717}
2718
2719
2720/**
2721 * External interface for querying whether AMD-V is being used.
2722 *
2723 * @returns true if VT-x is being used, otherwise false.
2724 * @param pUVM The user mode VM handle.
2725 * @sa HMR3IsVmxEnabled, HMIsEnabled
2726 */
2727VMMR3DECL(bool) HMR3IsSvmEnabled(PUVM pUVM)
2728{
2729 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2730 PVM pVM = pUVM->pVM;
2731 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2732 return pVM->hm.s.svm.fEnabled
2733 && pVM->hm.s.svm.fSupported
2734 && pVM->fHMEnabled;
2735}
2736
2737
2738/**
2739 * Checks if we are currently using nested paging.
2740 *
2741 * @returns true if nested paging is being used, otherwise false.
2742 * @param pUVM The user mode VM handle.
2743 */
2744VMMR3DECL(bool) HMR3IsNestedPagingActive(PUVM pUVM)
2745{
2746 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2747 PVM pVM = pUVM->pVM;
2748 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2749 return pVM->hm.s.fNestedPagingCfg;
2750}
2751
2752
2753/**
2754 * Checks if virtualized APIC registers are enabled.
2755 *
2756 * When enabled this feature allows the hardware to access most of the
2757 * APIC registers in the virtual-APIC page without causing VM-exits. See
2758 * Intel spec. 29.1.1 "Virtualized APIC Registers".
2759 *
2760 * @returns true if virtualized APIC registers is enabled, otherwise
2761 * false.
2762 * @param pUVM The user mode VM handle.
2763 */
2764VMMR3DECL(bool) HMR3AreVirtApicRegsEnabled(PUVM pUVM)
2765{
2766 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2767 PVM pVM = pUVM->pVM;
2768 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2769 return pVM->hm.s.fVirtApicRegs;
2770}
2771
2772
2773/**
2774 * Checks if APIC posted-interrupt processing is enabled.
2775 *
2776 * This returns whether we can deliver interrupts to the guest without
2777 * leaving guest-context by updating APIC state from host-context.
2778 *
2779 * @returns true if APIC posted-interrupt processing is enabled,
2780 * otherwise false.
2781 * @param pUVM The user mode VM handle.
2782 */
2783VMMR3DECL(bool) HMR3IsPostedIntrsEnabled(PUVM pUVM)
2784{
2785 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2786 PVM pVM = pUVM->pVM;
2787 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2788 return pVM->hm.s.fPostedIntrs;
2789}
2790
2791
2792/**
2793 * Checks if we are currently using VPID in VT-x mode.
2794 *
2795 * @returns true if VPID is being used, otherwise false.
2796 * @param pUVM The user mode VM handle.
2797 */
2798VMMR3DECL(bool) HMR3IsVpidActive(PUVM pUVM)
2799{
2800 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2801 PVM pVM = pUVM->pVM;
2802 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2803 return pVM->hm.s.ForR3.vmx.fVpid;
2804}
2805
2806
2807/**
2808 * Checks if we are currently using VT-x unrestricted execution,
2809 * aka UX.
2810 *
2811 * @returns true if UX is being used, otherwise false.
2812 * @param pUVM The user mode VM handle.
2813 */
2814VMMR3DECL(bool) HMR3IsUXActive(PUVM pUVM)
2815{
2816 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2817 PVM pVM = pUVM->pVM;
2818 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2819 return pVM->hm.s.vmx.fUnrestrictedGuestCfg
2820 || pVM->hm.s.svm.fSupported;
2821}
2822
2823
2824/**
2825 * Checks if the VMX-preemption timer is being used.
2826 *
2827 * @returns true if the VMX-preemption timer is being used, otherwise false.
2828 * @param pVM The cross context VM structure.
2829 */
2830VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM)
2831{
2832 return HMIsEnabled(pVM)
2833 && pVM->hm.s.vmx.fEnabled
2834 && pVM->hm.s.vmx.fUsePreemptTimerCfg;
2835}
2836
2837
2838#ifdef TODO_9217_VMCSINFO
2839/**
2840 * Helper for HMR3CheckError to log VMCS controls to the release log.
2841 *
2842 * @param idCpu The Virtual CPU ID.
2843 * @param pVmcsInfo The VMCS info. object.
2844 */
2845static void hmR3CheckErrorLogVmcsCtls(VMCPUID idCpu, PCVMXVMCSINFO pVmcsInfo)
2846{
2847 LogRel(("HM: CPU[%u] PinCtls %#RX32\n", idCpu, pVmcsInfo->u32PinCtls));
2848 {
2849 uint32_t const u32Val = pVmcsInfo->u32PinCtls;
2850 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_EXT_INT_EXIT );
2851 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_NMI_EXIT );
2852 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_VIRT_NMI );
2853 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_PREEMPT_TIMER);
2854 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_POSTED_INT );
2855 }
2856 LogRel(("HM: CPU[%u] ProcCtls %#RX32\n", idCpu, pVmcsInfo->u32ProcCtls));
2857 {
2858 uint32_t const u32Val = pVmcsInfo->u32ProcCtls;
2859 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INT_WINDOW_EXIT );
2860 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TSC_OFFSETTING);
2861 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_HLT_EXIT );
2862 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INVLPG_EXIT );
2863 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MWAIT_EXIT );
2864 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_RDPMC_EXIT );
2865 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_RDTSC_EXIT );
2866 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR3_LOAD_EXIT );
2867 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR3_STORE_EXIT );
2868 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TERTIARY_CTLS );
2869 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR8_LOAD_EXIT );
2870 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR8_STORE_EXIT );
2871 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TPR_SHADOW );
2872 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_NMI_WINDOW_EXIT );
2873 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MOV_DR_EXIT );
2874 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_UNCOND_IO_EXIT );
2875 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_IO_BITMAPS );
2876 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MONITOR_TRAP_FLAG );
2877 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_MSR_BITMAPS );
2878 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MONITOR_EXIT );
2879 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_PAUSE_EXIT );
2880 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_SECONDARY_CTLS);
2881 }
2882 LogRel(("HM: CPU[%u] ProcCtls2 %#RX32\n", idCpu, pVmcsInfo->u32ProcCtls2));
2883 {
2884 uint32_t const u32Val = pVmcsInfo->u32ProcCtls2;
2885 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_APIC_ACCESS );
2886 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT );
2887 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_DESC_TABLE_EXIT );
2888 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDTSCP );
2889 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_X2APIC_MODE );
2890 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VPID );
2891 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_WBINVD_EXIT );
2892 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_UNRESTRICTED_GUEST );
2893 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_APIC_REG_VIRT );
2894 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_INT_DELIVERY );
2895 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT );
2896 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDRAND_EXIT );
2897 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_INVPCID );
2898 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VMFUNC );
2899 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VMCS_SHADOWING );
2900 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_ENCLS_EXIT );
2901 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDSEED_EXIT );
2902 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PML );
2903 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT_XCPT_VE );
2904 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_CONCEAL_VMX_FROM_PT);
2905 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_XSAVES_XRSTORS );
2906 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_MODE_BASED_EPT_PERM);
2907 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_SPP_EPT );
2908 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PT_EPT );
2909 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_TSC_SCALING );
2910 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_USER_WAIT_PAUSE );
2911 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_ENCLV_EXIT );
2912 }
2913 LogRel(("HM: CPU[%u] EntryCtls %#RX32\n", idCpu, pVmcsInfo->u32EntryCtls));
2914 {
2915 uint32_t const u32Val = pVmcsInfo->u32EntryCtls;
2916 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_DEBUG );
2917 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_IA32E_MODE_GUEST );
2918 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_ENTRY_TO_SMM );
2919 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON);
2920 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PERF_MSR );
2921 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PAT_MSR );
2922 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_EFER_MSR );
2923 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR );
2924 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT);
2925 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_RTIT_CTL_MSR );
2926 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_CET_STATE );
2927 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PKRS_MSR );
2928 }
2929 LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", idCpu, pVmcsInfo->u32ExitCtls));
2930 {
2931 uint32_t const u32Val = pVmcsInfo->u32ExitCtls;
2932 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_DEBUG );
2933 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE );
2934 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PERF_MSR );
2935 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_ACK_EXT_INT );
2936 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PAT_MSR );
2937 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PAT_MSR );
2938 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_EFER_MSR );
2939 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_EFER_MSR );
2940 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER );
2941 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR );
2942 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT );
2943 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CLEAR_RTIT_CTL_MSR );
2944 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_CET_STATE );
2945 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PKRS_MSR );
2946 }
2947}
2948#endif
2949
2950
2951/**
2952 * Check fatal VT-x/AMD-V error and produce some meaningful
2953 * log release message.
2954 *
2955 * @param pVM The cross context VM structure.
2956 * @param iStatusCode VBox status code.
2957 */
2958VMMR3_INT_DECL(void) HMR3CheckError(PVM pVM, int iStatusCode)
2959{
2960 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2961 {
2962 /** @todo r=ramshankar: Are all EMTs out of ring-0 at this point!? If not, we
2963 * might be getting inaccurate values for non-guru'ing EMTs. */
2964 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
2965#ifdef TODO_9217_VMCSINFO
2966 PCVMXVMCSINFOSHARED pVmcsInfo = hmGetVmxActiveVmcsInfoShared(pVCpu);
2967#endif
2968 bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3;
2969 switch (iStatusCode)
2970 {
2971 case VERR_VMX_INVALID_VMCS_PTR:
2972 {
2973 LogRel(("HM: VERR_VMX_INVALID_VMCS_PTR:\n"));
2974 LogRel(("HM: CPU[%u] %s VMCS active\n", idCpu, fNstGstVmcsActive ? "Nested-guest" : "Guest"));
2975#ifdef TODO_9217_VMCSINFO
2976 LogRel(("HM: CPU[%u] Current pointer %#RHp vs %#RHp\n", idCpu, pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs,
2977 pVmcsInfo->HCPhysVmcs));
2978#endif
2979 LogRel(("HM: CPU[%u] Current VMCS version %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32VmcsRev));
2980 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
2981 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
2982 break;
2983 }
2984
2985 case VERR_VMX_UNABLE_TO_START_VM:
2986 {
2987 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM:\n"));
2988 LogRel(("HM: CPU[%u] %s VMCS active\n", idCpu, fNstGstVmcsActive ? "Nested-guest" : "Guest"));
2989 LogRel(("HM: CPU[%u] Instruction error %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32InstrError));
2990 LogRel(("HM: CPU[%u] Exit reason %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32ExitReason));
2991
2992 if ( pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS
2993 || pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS)
2994 {
2995 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
2996 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
2997 }
2998 else if (pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTLS)
2999 {
3000#ifdef TODO_9217_VMCSINFO
3001 hmR3CheckErrorLogVmcsCtls(idCpu, pVmcsInfo);
3002 LogRel(("HM: CPU[%u] HCPhysMsrBitmap %#RHp\n", idCpu, pVmcsInfo->HCPhysMsrBitmap));
3003 LogRel(("HM: CPU[%u] HCPhysGuestMsrLoad %#RHp\n", idCpu, pVmcsInfo->HCPhysGuestMsrLoad));
3004 LogRel(("HM: CPU[%u] HCPhysGuestMsrStore %#RHp\n", idCpu, pVmcsInfo->HCPhysGuestMsrStore));
3005 LogRel(("HM: CPU[%u] HCPhysHostMsrLoad %#RHp\n", idCpu, pVmcsInfo->HCPhysHostMsrLoad));
3006 LogRel(("HM: CPU[%u] cEntryMsrLoad %u\n", idCpu, pVmcsInfo->cEntryMsrLoad));
3007 LogRel(("HM: CPU[%u] cExitMsrStore %u\n", idCpu, pVmcsInfo->cExitMsrStore));
3008 LogRel(("HM: CPU[%u] cExitMsrLoad %u\n", idCpu, pVmcsInfo->cExitMsrLoad));
3009#endif
3010 }
3011 /** @todo Log VM-entry event injection control fields
3012 * VMX_VMCS_CTRL_ENTRY_IRQ_INFO, VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE
3013 * and VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH from the VMCS. */
3014 break;
3015 }
3016
3017 case VERR_VMX_INVALID_GUEST_STATE:
3018 {
3019 LogRel(("HM: VERR_VMX_INVALID_GUEST_STATE:\n"));
3020 LogRel(("HM: CPU[%u] HM error = %#RX32\n", idCpu, pVCpu->hm.s.u32HMError));
3021 LogRel(("HM: CPU[%u] Guest-intr. state = %#RX32\n", idCpu, pVCpu->hm.s.vmx.LastError.u32GuestIntrState));
3022#ifdef TODO_9217_VMCSINFO
3023 hmR3CheckErrorLogVmcsCtls(idCpu, pVmcsInfo);
3024#endif
3025 break;
3026 }
3027
3028 /* The guru will dump the HM error and exit history. Nothing extra to report for these errors. */
3029 case VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO:
3030 case VERR_VMX_INVALID_VMXON_PTR:
3031 case VERR_VMX_UNEXPECTED_EXIT:
3032 case VERR_VMX_INVALID_VMCS_FIELD:
3033 case VERR_SVM_UNKNOWN_EXIT:
3034 case VERR_SVM_UNEXPECTED_EXIT:
3035 case VERR_SVM_UNEXPECTED_PATCH_TYPE:
3036 case VERR_SVM_UNEXPECTED_XCPT_EXIT:
3037 case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE:
3038 break;
3039 }
3040 }
3041
3042 if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM)
3043 {
3044 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-1 %#RX32\n", pVM->hm.s.ForR3.vmx.Msrs.EntryCtls.n.allowed1));
3045 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-0 %#RX32\n", pVM->hm.s.ForR3.vmx.Msrs.EntryCtls.n.allowed0));
3046 }
3047 else if (iStatusCode == VERR_VMX_INVALID_VMXON_PTR)
3048 LogRel(("HM: HCPhysVmxEnableError = %#RHp\n", pVM->hm.s.ForR3.vmx.HCPhysVmxEnableError));
3049}
3050
3051
3052/**
3053 * Execute state save operation.
3054 *
3055 * Save only data that cannot be re-loaded while entering HM ring-0 code. This
3056 * is because we always save the VM state from ring-3 and thus most HM state
3057 * will be re-synced dynamically at runtime and don't need to be part of the VM
3058 * saved state.
3059 *
3060 * @returns VBox status code.
3061 * @param pVM The cross context VM structure.
3062 * @param pSSM SSM operation handle.
3063 */
3064static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM)
3065{
3066 Log(("hmR3Save:\n"));
3067
3068 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3069 {
3070 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3071 Assert(!pVCpu->hm.s.Event.fPending);
3072 if (pVM->cpum.ro.GuestFeatures.fSvm)
3073 {
3074 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
3075 SSMR3PutBool(pSSM, pVmcbNstGstCache->fCacheValid);
3076 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdCRx);
3077 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrCRx);
3078 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdDRx);
3079 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrDRx);
3080 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16PauseFilterThreshold);
3081 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16PauseFilterCount);
3082 SSMR3PutU32(pSSM, pVmcbNstGstCache->u32InterceptXcpt);
3083 SSMR3PutU64(pSSM, pVmcbNstGstCache->u64InterceptCtrl);
3084 SSMR3PutU64(pSSM, pVmcbNstGstCache->u64TSCOffset);
3085 SSMR3PutBool(pSSM, pVmcbNstGstCache->fVIntrMasking);
3086 SSMR3PutBool(pSSM, pVmcbNstGstCache->fNestedPaging);
3087 SSMR3PutBool(pSSM, pVmcbNstGstCache->fLbrVirt);
3088 }
3089 }
3090
3091 /* Save the guest patch data. */
3092 SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem);
3093 SSMR3PutGCPtr(pSSM, pVM->hm.s.pFreeGuestPatchMem);
3094 SSMR3PutU32(pSSM, pVM->hm.s.cbGuestPatchMem);
3095
3096 /* Store all the guest patch records too. */
3097 int rc = SSMR3PutU32(pSSM, pVM->hm.s.cPatches);
3098 if (RT_FAILURE(rc))
3099 return rc;
3100
3101 for (uint32_t i = 0; i < pVM->hm.s.cPatches; i++)
3102 {
3103 AssertCompileSize(HMTPRINSTR, 4);
3104 PCHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
3105 SSMR3PutU32(pSSM, pPatch->Core.Key);
3106 SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
3107 SSMR3PutU32(pSSM, pPatch->cbOp);
3108 SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
3109 SSMR3PutU32(pSSM, pPatch->cbNewOp);
3110 SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
3111 SSMR3PutU32(pSSM, pPatch->uSrcOperand);
3112 SSMR3PutU32(pSSM, pPatch->uDstOperand);
3113 SSMR3PutU32(pSSM, pPatch->pJumpTarget);
3114 rc = SSMR3PutU32(pSSM, pPatch->cFaults);
3115 if (RT_FAILURE(rc))
3116 return rc;
3117 }
3118
3119 return VINF_SUCCESS;
3120}
3121
3122
3123/**
3124 * Execute state load operation.
3125 *
3126 * @returns VBox status code.
3127 * @param pVM The cross context VM structure.
3128 * @param pSSM SSM operation handle.
3129 * @param uVersion Data layout version.
3130 * @param uPass The data pass.
3131 */
3132static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3133{
3134 int rc;
3135
3136 LogFlowFunc(("uVersion=%u\n", uVersion));
3137 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
3138
3139 /*
3140 * Validate version.
3141 */
3142 if ( uVersion != HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT
3143 && uVersion != HM_SAVED_STATE_VERSION_TPR_PATCHING
3144 && uVersion != HM_SAVED_STATE_VERSION_NO_TPR_PATCHING
3145 && uVersion != HM_SAVED_STATE_VERSION_2_0_X)
3146 {
3147 AssertMsgFailed(("hmR3Load: Invalid version uVersion=%d!\n", uVersion));
3148 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3149 }
3150
3151 /*
3152 * Load per-VCPU state.
3153 */
3154 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3155 {
3156 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3157 if (uVersion >= HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT)
3158 {
3159 /* Load the SVM nested hw.virt state if the VM is configured for it. */
3160 if (pVM->cpum.ro.GuestFeatures.fSvm)
3161 {
3162 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
3163 SSMR3GetBool(pSSM, &pVmcbNstGstCache->fCacheValid);
3164 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdCRx);
3165 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrCRx);
3166 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdDRx);
3167 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrDRx);
3168 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16PauseFilterThreshold);
3169 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16PauseFilterCount);
3170 SSMR3GetU32(pSSM, &pVmcbNstGstCache->u32InterceptXcpt);
3171 SSMR3GetU64(pSSM, &pVmcbNstGstCache->u64InterceptCtrl);
3172 SSMR3GetU64(pSSM, &pVmcbNstGstCache->u64TSCOffset);
3173 SSMR3GetBool(pSSM, &pVmcbNstGstCache->fVIntrMasking);
3174 SSMR3GetBool(pSSM, &pVmcbNstGstCache->fNestedPaging);
3175 rc = SSMR3GetBool(pSSM, &pVmcbNstGstCache->fLbrVirt);
3176 AssertRCReturn(rc, rc);
3177 }
3178 }
3179 else
3180 {
3181 /* Pending HM event (obsolete for a long time since TPRM holds the info.) */
3182 SSMR3GetU32(pSSM, &pVCpu->hm.s.Event.fPending);
3183 SSMR3GetU32(pSSM, &pVCpu->hm.s.Event.u32ErrCode);
3184 SSMR3GetU64(pSSM, &pVCpu->hm.s.Event.u64IntInfo);
3185
3186 /* VMX fWasInRealMode related data. */
3187 uint32_t uDummy;
3188 SSMR3GetU32(pSSM, &uDummy);
3189 SSMR3GetU32(pSSM, &uDummy);
3190 rc = SSMR3GetU32(pSSM, &uDummy);
3191 AssertRCReturn(rc, rc);
3192 }
3193 }
3194
3195 /*
3196 * Load TPR patching data.
3197 */
3198 if (uVersion >= HM_SAVED_STATE_VERSION_TPR_PATCHING)
3199 {
3200 SSMR3GetGCPtr(pSSM, &pVM->hm.s.pGuestPatchMem);
3201 SSMR3GetGCPtr(pSSM, &pVM->hm.s.pFreeGuestPatchMem);
3202 SSMR3GetU32(pSSM, &pVM->hm.s.cbGuestPatchMem);
3203
3204 /* Fetch all TPR patch records. */
3205 rc = SSMR3GetU32(pSSM, &pVM->hm.s.cPatches);
3206 AssertRCReturn(rc, rc);
3207 for (uint32_t i = 0; i < pVM->hm.s.cPatches; i++)
3208 {
3209 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
3210 SSMR3GetU32(pSSM, &pPatch->Core.Key);
3211 SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
3212 SSMR3GetU32(pSSM, &pPatch->cbOp);
3213 SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
3214 SSMR3GetU32(pSSM, &pPatch->cbNewOp);
3215 SSM_GET_ENUM32_RET(pSSM, pPatch->enmType, HMTPRINSTR);
3216
3217 if (pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT)
3218 pVM->hm.s.fTprPatchingActive = true;
3219 Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fTprPatchingActive == false);
3220
3221 SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
3222 SSMR3GetU32(pSSM, &pPatch->uDstOperand);
3223 SSMR3GetU32(pSSM, &pPatch->cFaults);
3224 rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
3225 AssertRCReturn(rc, rc);
3226
3227 LogFlow(("hmR3Load: patch %d\n", i));
3228 LogFlow(("Key = %x\n", pPatch->Core.Key));
3229 LogFlow(("cbOp = %d\n", pPatch->cbOp));
3230 LogFlow(("cbNewOp = %d\n", pPatch->cbNewOp));
3231 LogFlow(("type = %d\n", pPatch->enmType));
3232 LogFlow(("srcop = %d\n", pPatch->uSrcOperand));
3233 LogFlow(("dstop = %d\n", pPatch->uDstOperand));
3234 LogFlow(("cFaults = %d\n", pPatch->cFaults));
3235 LogFlow(("target = %x\n", pPatch->pJumpTarget));
3236
3237 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
3238 AssertRCReturn(rc, rc);
3239 }
3240 }
3241
3242 return VINF_SUCCESS;
3243}
3244
3245
3246/**
3247 * Displays HM info.
3248 *
3249 * @param pVM The cross context VM structure.
3250 * @param pHlp The info helper functions.
3251 * @param pszArgs Arguments, ignored.
3252 */
3253static DECLCALLBACK(void) hmR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3254{
3255 NOREF(pszArgs);
3256 PVMCPU pVCpu = VMMGetCpu(pVM);
3257 if (!pVCpu)
3258 pVCpu = pVM->apCpusR3[0];
3259
3260 if (HMIsEnabled(pVM))
3261 {
3262 if (pVM->hm.s.vmx.fSupported)
3263 pHlp->pfnPrintf(pHlp, "CPU[%u]: VT-x info:\n", pVCpu->idCpu);
3264 else
3265 pHlp->pfnPrintf(pHlp, "CPU[%u]: AMD-V info:\n", pVCpu->idCpu);
3266 pHlp->pfnPrintf(pHlp, " HM error = %#x (%u)\n", pVCpu->hm.s.u32HMError, pVCpu->hm.s.u32HMError);
3267 pHlp->pfnPrintf(pHlp, " rcLastExitToR3 = %Rrc\n", pVCpu->hm.s.rcLastExitToR3);
3268 if (pVM->hm.s.vmx.fSupported)
3269 {
3270 PCVMXVMCSINFOSHARED pVmcsInfoShared = hmGetVmxActiveVmcsInfoShared(pVCpu);
3271 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3272 bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3;
3273
3274 pHlp->pfnPrintf(pHlp, " %s VMCS active\n", fNstGstVmcsActive ? "Nested-guest" : "Guest");
3275 pHlp->pfnPrintf(pHlp, " Real-on-v86 active = %RTbool\n", fRealOnV86Active);
3276 if (fRealOnV86Active)
3277 {
3278 pHlp->pfnPrintf(pHlp, " EFlags = %#x\n", pVmcsInfoShared->RealMode.Eflags.u32);
3279 pHlp->pfnPrintf(pHlp, " Attr CS = %#x\n", pVmcsInfoShared->RealMode.AttrCS.u);
3280 pHlp->pfnPrintf(pHlp, " Attr SS = %#x\n", pVmcsInfoShared->RealMode.AttrSS.u);
3281 pHlp->pfnPrintf(pHlp, " Attr DS = %#x\n", pVmcsInfoShared->RealMode.AttrDS.u);
3282 pHlp->pfnPrintf(pHlp, " Attr ES = %#x\n", pVmcsInfoShared->RealMode.AttrES.u);
3283 pHlp->pfnPrintf(pHlp, " Attr FS = %#x\n", pVmcsInfoShared->RealMode.AttrFS.u);
3284 pHlp->pfnPrintf(pHlp, " Attr GS = %#x\n", pVmcsInfoShared->RealMode.AttrGS.u);
3285 }
3286 }
3287 }
3288 else
3289 pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
3290}
3291
3292
3293/**
3294 * Displays the HM Last-Branch-Record info. for the guest.
3295 *
3296 * @param pVM The cross context VM structure.
3297 * @param pHlp The info helper functions.
3298 * @param pszArgs Arguments, ignored.
3299 */
3300static DECLCALLBACK(void) hmR3InfoLbr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3301{
3302 NOREF(pszArgs);
3303 PVMCPU pVCpu = VMMGetCpu(pVM);
3304 if (!pVCpu)
3305 pVCpu = pVM->apCpusR3[0];
3306
3307 if (!HMIsEnabled(pVM))
3308 pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
3309 else if (HMIsVmxActive(pVM))
3310 {
3311 if (pVM->hm.s.vmx.fLbrCfg)
3312 {
3313 PCVMXVMCSINFOSHARED pVmcsInfoShared = hmGetVmxActiveVmcsInfoShared(pVCpu);
3314 uint32_t const cLbrStack = pVM->hm.s.ForR3.vmx.idLbrFromIpMsrLast - pVM->hm.s.ForR3.vmx.idLbrFromIpMsrFirst + 1;
3315
3316 /** @todo r=ramshankar: The index technically varies depending on the CPU, but
3317 * 0xf should cover everything we support thus far. Fix if necessary
3318 * later. */
3319 uint32_t const idxTopOfStack = pVmcsInfoShared->u64LbrTosMsr & 0xf;
3320 if (idxTopOfStack > cLbrStack)
3321 {
3322 pHlp->pfnPrintf(pHlp, "Top-of-stack LBR MSR seems corrupt (index=%u, msr=%#RX64) expected index < %u\n",
3323 idxTopOfStack, pVmcsInfoShared->u64LbrTosMsr, cLbrStack);
3324 return;
3325 }
3326
3327 /*
3328 * Dump the circular buffer of LBR records starting from the most recent record (contained in idxTopOfStack).
3329 */
3330 pHlp->pfnPrintf(pHlp, "CPU[%u]: LBRs (most-recent first)\n", pVCpu->idCpu);
3331 uint32_t idxCurrent = idxTopOfStack;
3332 Assert(idxTopOfStack < cLbrStack);
3333 Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr) <= cLbrStack);
3334 Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrToIpMsr) <= cLbrStack);
3335 for (;;)
3336 {
3337 if (pVM->hm.s.ForR3.vmx.idLbrToIpMsrFirst)
3338 pHlp->pfnPrintf(pHlp, " Branch (%2u): From IP=%#016RX64 - To IP=%#016RX64\n", idxCurrent,
3339 pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent], pVmcsInfoShared->au64LbrToIpMsr[idxCurrent]);
3340 else
3341 pHlp->pfnPrintf(pHlp, " Branch (%2u): LBR=%#RX64\n", idxCurrent, pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent]);
3342
3343 idxCurrent = (idxCurrent - 1) % cLbrStack;
3344 if (idxCurrent == idxTopOfStack)
3345 break;
3346 }
3347 }
3348 else
3349 pHlp->pfnPrintf(pHlp, "VM not configured to record LBRs for the guest\n");
3350 }
3351 else
3352 {
3353 Assert(HMIsSvmActive(pVM));
3354 /** @todo SVM: LBRs (get them from VMCB if possible). */
3355 pHlp->pfnPrintf(pHlp, "SVM LBR not implemented.\n");
3356 }
3357}
3358
3359
3360/**
3361 * Displays the HM pending event.
3362 *
3363 * @param pVM The cross context VM structure.
3364 * @param pHlp The info helper functions.
3365 * @param pszArgs Arguments, ignored.
3366 */
3367static DECLCALLBACK(void) hmR3InfoEventPending(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3368{
3369 NOREF(pszArgs);
3370 PVMCPU pVCpu = VMMGetCpu(pVM);
3371 if (!pVCpu)
3372 pVCpu = pVM->apCpusR3[0];
3373
3374 if (HMIsEnabled(pVM))
3375 {
3376 pHlp->pfnPrintf(pHlp, "CPU[%u]: HM event (fPending=%RTbool)\n", pVCpu->idCpu, pVCpu->hm.s.Event.fPending);
3377 if (pVCpu->hm.s.Event.fPending)
3378 {
3379 pHlp->pfnPrintf(pHlp, " u64IntInfo = %#RX64\n", pVCpu->hm.s.Event.u64IntInfo);
3380 pHlp->pfnPrintf(pHlp, " u32ErrCode = %#RX64\n", pVCpu->hm.s.Event.u32ErrCode);
3381 pHlp->pfnPrintf(pHlp, " cbInstr = %u bytes\n", pVCpu->hm.s.Event.cbInstr);
3382 pHlp->pfnPrintf(pHlp, " GCPtrFaultAddress = %#RGp\n", pVCpu->hm.s.Event.GCPtrFaultAddress);
3383 }
3384 }
3385 else
3386 pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
3387}
3388
3389
3390/**
3391 * Displays the SVM nested-guest VMCB cache.
3392 *
3393 * @param pVM The cross context VM structure.
3394 * @param pHlp The info helper functions.
3395 * @param pszArgs Arguments, ignored.
3396 */
3397static DECLCALLBACK(void) hmR3InfoSvmNstGstVmcbCache(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3398{
3399 NOREF(pszArgs);
3400 PVMCPU pVCpu = VMMGetCpu(pVM);
3401 if (!pVCpu)
3402 pVCpu = pVM->apCpusR3[0];
3403
3404 bool const fSvmEnabled = HMR3IsSvmEnabled(pVM->pUVM);
3405 if ( fSvmEnabled
3406 && pVM->cpum.ro.GuestFeatures.fSvm)
3407 {
3408 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
3409 pHlp->pfnPrintf(pHlp, "CPU[%u]: HM SVM nested-guest VMCB cache\n", pVCpu->idCpu);
3410 pHlp->pfnPrintf(pHlp, " fCacheValid = %#RTbool\n", pVmcbNstGstCache->fCacheValid);
3411 pHlp->pfnPrintf(pHlp, " u16InterceptRdCRx = %#RX16\n", pVmcbNstGstCache->u16InterceptRdCRx);
3412 pHlp->pfnPrintf(pHlp, " u16InterceptWrCRx = %#RX16\n", pVmcbNstGstCache->u16InterceptWrCRx);
3413 pHlp->pfnPrintf(pHlp, " u16InterceptRdDRx = %#RX16\n", pVmcbNstGstCache->u16InterceptRdDRx);
3414 pHlp->pfnPrintf(pHlp, " u16InterceptWrDRx = %#RX16\n", pVmcbNstGstCache->u16InterceptWrDRx);
3415 pHlp->pfnPrintf(pHlp, " u16PauseFilterThreshold = %#RX16\n", pVmcbNstGstCache->u16PauseFilterThreshold);
3416 pHlp->pfnPrintf(pHlp, " u16PauseFilterCount = %#RX16\n", pVmcbNstGstCache->u16PauseFilterCount);
3417 pHlp->pfnPrintf(pHlp, " u32InterceptXcpt = %#RX32\n", pVmcbNstGstCache->u32InterceptXcpt);
3418 pHlp->pfnPrintf(pHlp, " u64InterceptCtrl = %#RX64\n", pVmcbNstGstCache->u64InterceptCtrl);
3419 pHlp->pfnPrintf(pHlp, " u64TSCOffset = %#RX64\n", pVmcbNstGstCache->u64TSCOffset);
3420 pHlp->pfnPrintf(pHlp, " fVIntrMasking = %RTbool\n", pVmcbNstGstCache->fVIntrMasking);
3421 pHlp->pfnPrintf(pHlp, " fNestedPaging = %RTbool\n", pVmcbNstGstCache->fNestedPaging);
3422 pHlp->pfnPrintf(pHlp, " fLbrVirt = %RTbool\n", pVmcbNstGstCache->fLbrVirt);
3423 }
3424 else
3425 {
3426 if (!fSvmEnabled)
3427 pHlp->pfnPrintf(pHlp, "HM SVM is not enabled for this VM!\n");
3428 else
3429 pHlp->pfnPrintf(pHlp, "SVM feature is not exposed to the guest!\n");
3430 }
3431}
3432
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette