VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/HM.cpp@ 87550

Last change on this file since 87550 was 87550, checked in by vboxsync, 4 years ago

VMM/HMVMX: Copied the fUsePreemptTimer member from HM::vmx to HMR0PERVM::vmx. bugref:9217

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 155.6 KB
Line 
1/* $Id: HM.cpp 87550 2021-02-03 09:54:10Z vboxsync $ */
2/** @file
3 * HM - Intel/AMD VM Hardware Support Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_hm HM - Hardware Assisted Virtualization Manager
19 *
20 * The HM manages guest execution using the VT-x and AMD-V CPU hardware
21 * extensions.
22 *
23 * {summary of what HM does}
24 *
25 * Hardware assisted virtualization manager was originally abbreviated HWACCM,
26 * however that was cumbersome to write and parse for such a central component,
27 * so it was shortened to HM when refactoring the code in the 4.3 development
28 * cycle.
29 *
30 * {add sections with more details}
31 *
32 * @sa @ref grp_hm
33 */
34
35
36/*********************************************************************************************************************************
37* Header Files *
38*********************************************************************************************************************************/
39#define LOG_GROUP LOG_GROUP_HM
40#define VMCPU_INCL_CPUM_GST_CTX
41#include <VBox/vmm/cpum.h>
42#include <VBox/vmm/stam.h>
43#include <VBox/vmm/mm.h>
44#include <VBox/vmm/em.h>
45#include <VBox/vmm/pdmapi.h>
46#include <VBox/vmm/pgm.h>
47#include <VBox/vmm/ssm.h>
48#include <VBox/vmm/gim.h>
49#include <VBox/vmm/trpm.h>
50#include <VBox/vmm/dbgf.h>
51#include <VBox/vmm/iom.h>
52#include <VBox/vmm/iem.h>
53#include <VBox/vmm/selm.h>
54#include <VBox/vmm/nem.h>
55#include <VBox/vmm/hm_vmx.h>
56#include <VBox/vmm/hm_svm.h>
57#include "HMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/err.h>
60#include <VBox/param.h>
61
62#include <iprt/assert.h>
63#include <VBox/log.h>
64#include <iprt/asm.h>
65#include <iprt/asm-amd64-x86.h>
66#include <iprt/env.h>
67#include <iprt/thread.h>
68
69
70/*********************************************************************************************************************************
71* Defined Constants And Macros *
72*********************************************************************************************************************************/
73/** @def HMVMX_REPORT_FEAT
74 * Reports VT-x feature to the release log.
75 *
76 * @param a_uAllowed1 Mask of allowed-1 feature bits.
77 * @param a_uAllowed0 Mask of allowed-0 feature bits.
78 * @param a_StrDesc The description string to report.
79 * @param a_Featflag Mask of the feature to report.
80 */
81#define HMVMX_REPORT_FEAT(a_uAllowed1, a_uAllowed0, a_StrDesc, a_Featflag) \
82 do { \
83 if ((a_uAllowed1) & (a_Featflag)) \
84 { \
85 if ((a_uAllowed0) & (a_Featflag)) \
86 LogRel(("HM: " a_StrDesc " (must be set)\n")); \
87 else \
88 LogRel(("HM: " a_StrDesc "\n")); \
89 } \
90 else \
91 LogRel(("HM: " a_StrDesc " (must be cleared)\n")); \
92 } while (0)
93
94/** @def HMVMX_REPORT_ALLOWED_FEAT
95 * Reports an allowed VT-x feature to the release log.
96 *
97 * @param a_uAllowed1 Mask of allowed-1 feature bits.
98 * @param a_StrDesc The description string to report.
99 * @param a_FeatFlag Mask of the feature to report.
100 */
101#define HMVMX_REPORT_ALLOWED_FEAT(a_uAllowed1, a_StrDesc, a_FeatFlag) \
102 do { \
103 if ((a_uAllowed1) & (a_FeatFlag)) \
104 LogRel(("HM: " a_StrDesc "\n")); \
105 else \
106 LogRel(("HM: " a_StrDesc " not supported\n")); \
107 } while (0)
108
109/** @def HMVMX_REPORT_MSR_CAP
110 * Reports MSR feature capability.
111 *
112 * @param a_MsrCaps Mask of MSR feature bits.
113 * @param a_StrDesc The description string to report.
114 * @param a_fCap Mask of the feature to report.
115 */
116#define HMVMX_REPORT_MSR_CAP(a_MsrCaps, a_StrDesc, a_fCap) \
117 do { \
118 if ((a_MsrCaps) & (a_fCap)) \
119 LogRel(("HM: " a_StrDesc "\n")); \
120 } while (0)
121
122/** @def HMVMX_LOGREL_FEAT
123 * Dumps a feature flag from a bitmap of features to the release log.
124 *
125 * @param a_fVal The value of all the features.
126 * @param a_fMask The specific bitmask of the feature.
127 */
128#define HMVMX_LOGREL_FEAT(a_fVal, a_fMask) \
129 do { \
130 if ((a_fVal) & (a_fMask)) \
131 LogRel(("HM: %s\n", #a_fMask)); \
132 } while (0)
133
134
135/*********************************************************************************************************************************
136* Internal Functions *
137*********************************************************************************************************************************/
138static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM);
139static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
140static DECLCALLBACK(void) hmR3InfoSvmNstGstVmcbCache(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
141static DECLCALLBACK(void) hmR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
142static DECLCALLBACK(void) hmR3InfoEventPending(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
143static DECLCALLBACK(void) hmR3InfoLbr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
144static int hmR3InitFinalizeR3(PVM pVM);
145static int hmR3InitFinalizeR0(PVM pVM);
146static int hmR3InitFinalizeR0Intel(PVM pVM);
147static int hmR3InitFinalizeR0Amd(PVM pVM);
148static int hmR3TermCPU(PVM pVM);
149
150
151#ifdef VBOX_WITH_STATISTICS
152/**
153 * Returns the name of the hardware exception.
154 *
155 * @returns The name of the hardware exception.
156 * @param uVector The exception vector.
157 */
158static const char *hmR3GetXcptName(uint8_t uVector)
159{
160 switch (uVector)
161 {
162 case X86_XCPT_DE: return "#DE";
163 case X86_XCPT_DB: return "#DB";
164 case X86_XCPT_NMI: return "#NMI";
165 case X86_XCPT_BP: return "#BP";
166 case X86_XCPT_OF: return "#OF";
167 case X86_XCPT_BR: return "#BR";
168 case X86_XCPT_UD: return "#UD";
169 case X86_XCPT_NM: return "#NM";
170 case X86_XCPT_DF: return "#DF";
171 case X86_XCPT_CO_SEG_OVERRUN: return "#CO_SEG_OVERRUN";
172 case X86_XCPT_TS: return "#TS";
173 case X86_XCPT_NP: return "#NP";
174 case X86_XCPT_SS: return "#SS";
175 case X86_XCPT_GP: return "#GP";
176 case X86_XCPT_PF: return "#PF";
177 case X86_XCPT_MF: return "#MF";
178 case X86_XCPT_AC: return "#AC";
179 case X86_XCPT_MC: return "#MC";
180 case X86_XCPT_XF: return "#XF";
181 case X86_XCPT_VE: return "#VE";
182 case X86_XCPT_CP: return "#CP";
183 case X86_XCPT_VC: return "#VC";
184 case X86_XCPT_SX: return "#SX";
185 }
186 return "Reserved";
187}
188#endif /* VBOX_WITH_STATISTICS */
189
190
191/**
192 * Initializes the HM.
193 *
194 * This is the very first component to really do init after CFGM so that we can
195 * establish the predominant execution engine for the VM prior to initializing
196 * other modules. It takes care of NEM initialization if needed (HM disabled or
197 * not available in HW).
198 *
199 * If VT-x or AMD-V hardware isn't available, HM will try fall back on a native
200 * hypervisor API via NEM, and then back on raw-mode if that isn't available
201 * either. The fallback to raw-mode will not happen if /HM/HMForced is set
202 * (like for guest using SMP or 64-bit as well as for complicated guest like OS
203 * X, OS/2 and others).
204 *
205 * Note that a lot of the set up work is done in ring-0 and thus postponed till
206 * the ring-3 and ring-0 callback to HMR3InitCompleted.
207 *
208 * @returns VBox status code.
209 * @param pVM The cross context VM structure.
210 *
211 * @remarks Be careful with what we call here, since most of the VMM components
212 * are uninitialized.
213 */
214VMMR3_INT_DECL(int) HMR3Init(PVM pVM)
215{
216 LogFlowFunc(("\n"));
217
218 /*
219 * Assert alignment and sizes.
220 */
221 AssertCompileMemberAlignment(VM, hm.s, 32);
222 AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding));
223
224 /*
225 * Register the saved state data unit.
226 */
227 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HM_SAVED_STATE_VERSION, sizeof(HM),
228 NULL, NULL, NULL,
229 NULL, hmR3Save, NULL,
230 NULL, hmR3Load, NULL);
231 if (RT_FAILURE(rc))
232 return rc;
233
234 /*
235 * Register info handlers.
236 */
237 rc = DBGFR3InfoRegisterInternalEx(pVM, "hm", "Dumps HM info.", hmR3Info, DBGFINFO_FLAGS_ALL_EMTS);
238 AssertRCReturn(rc, rc);
239
240 rc = DBGFR3InfoRegisterInternalEx(pVM, "hmeventpending", "Dumps the pending HM event.", hmR3InfoEventPending,
241 DBGFINFO_FLAGS_ALL_EMTS);
242 AssertRCReturn(rc, rc);
243
244 rc = DBGFR3InfoRegisterInternalEx(pVM, "svmvmcbcache", "Dumps the HM SVM nested-guest VMCB cache.",
245 hmR3InfoSvmNstGstVmcbCache, DBGFINFO_FLAGS_ALL_EMTS);
246 AssertRCReturn(rc, rc);
247
248 rc = DBGFR3InfoRegisterInternalEx(pVM, "lbr", "Dumps the HM LBR info.", hmR3InfoLbr, DBGFINFO_FLAGS_ALL_EMTS);
249 AssertRCReturn(rc, rc);
250
251 /*
252 * Read configuration.
253 */
254 PCFGMNODE pCfgHm = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HM/");
255
256 /*
257 * Validate the HM settings.
258 */
259 rc = CFGMR3ValidateConfig(pCfgHm, "/HM/",
260 "HMForced" /* implied 'true' these days */
261 "|UseNEMInstead"
262 "|FallbackToNEM"
263 "|EnableNestedPaging"
264 "|EnableUX"
265 "|EnableLargePages"
266 "|EnableVPID"
267 "|IBPBOnVMExit"
268 "|IBPBOnVMEntry"
269 "|SpecCtrlByHost"
270 "|L1DFlushOnSched"
271 "|L1DFlushOnVMEntry"
272 "|MDSClearOnSched"
273 "|MDSClearOnVMEntry"
274 "|TPRPatchingEnabled"
275 "|64bitEnabled"
276 "|Exclusive"
277 "|MaxResumeLoops"
278 "|VmxPleGap"
279 "|VmxPleWindow"
280 "|VmxLbr"
281 "|UseVmxPreemptTimer"
282 "|SvmPauseFilter"
283 "|SvmPauseFilterThreshold"
284 "|SvmVirtVmsaveVmload"
285 "|SvmVGif"
286 "|LovelyMesaDrvWorkaround",
287 "" /* pszValidNodes */, "HM" /* pszWho */, 0 /* uInstance */);
288 if (RT_FAILURE(rc))
289 return rc;
290
291 /** @cfgm{/HM/HMForced, bool, false}
292 * Forces hardware virtualization, no falling back on raw-mode. HM must be
293 * enabled, i.e. /HMEnabled must be true. */
294 bool fHMForced;
295 AssertRelease(pVM->fHMEnabled);
296 fHMForced = true;
297
298 /** @cfgm{/HM/UseNEMInstead, bool, true}
299 * Don't use HM, use NEM instead. */
300 bool fUseNEMInstead = false;
301 rc = CFGMR3QueryBoolDef(pCfgHm, "UseNEMInstead", &fUseNEMInstead, false);
302 AssertRCReturn(rc, rc);
303 if (fUseNEMInstead && pVM->fHMEnabled)
304 {
305 LogRel(("HM: Setting fHMEnabled to false because fUseNEMInstead is set.\n"));
306 pVM->fHMEnabled = false;
307 }
308
309 /** @cfgm{/HM/FallbackToNEM, bool, true}
310 * Enables fallback on NEM. */
311 bool fFallbackToNEM = true;
312 rc = CFGMR3QueryBoolDef(pCfgHm, "FallbackToNEM", &fFallbackToNEM, true);
313 AssertRCReturn(rc, rc);
314
315 /** @cfgm{/HM/EnableNestedPaging, bool, false}
316 * Enables nested paging (aka extended page tables). */
317 bool fAllowNestedPaging = false;
318 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableNestedPaging", &fAllowNestedPaging, false);
319 AssertRCReturn(rc, rc);
320
321 /** @cfgm{/HM/EnableUX, bool, true}
322 * Enables the VT-x unrestricted execution feature. */
323 bool fAllowUnrestricted = true;
324 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableUX", &fAllowUnrestricted, true);
325 AssertRCReturn(rc, rc);
326
327 /** @cfgm{/HM/EnableLargePages, bool, false}
328 * Enables using large pages (2 MB) for guest memory, thus saving on (nested)
329 * page table walking and maybe better TLB hit rate in some cases. */
330 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableLargePages", &pVM->hm.s.fLargePages, false);
331 AssertRCReturn(rc, rc);
332
333 /** @cfgm{/HM/EnableVPID, bool, false}
334 * Enables the VT-x VPID feature. */
335 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableVPID", &pVM->hm.s.vmx.fAllowVpid, false);
336 AssertRCReturn(rc, rc);
337
338 /** @cfgm{/HM/TPRPatchingEnabled, bool, false}
339 * Enables TPR patching for 32-bit windows guests with IO-APIC. */
340 rc = CFGMR3QueryBoolDef(pCfgHm, "TPRPatchingEnabled", &pVM->hm.s.fTprPatchingAllowed, false);
341 AssertRCReturn(rc, rc);
342
343 /** @cfgm{/HM/64bitEnabled, bool, 32-bit:false, 64-bit:true}
344 * Enables AMD64 cpu features.
345 * On 32-bit hosts this isn't default and require host CPU support. 64-bit hosts
346 * already have the support. */
347#ifdef VBOX_WITH_64_BITS_GUESTS
348 rc = CFGMR3QueryBoolDef(pCfgHm, "64bitEnabled", &pVM->hm.s.fAllow64BitGuestsCfg, HC_ARCH_BITS == 64);
349 AssertLogRelRCReturn(rc, rc);
350#else
351 pVM->hm.s.fAllow64BitGuestsCfg = false;
352#endif
353
354 /** @cfgm{/HM/VmxPleGap, uint32_t, 0}
355 * The pause-filter exiting gap in TSC ticks. When the number of ticks between
356 * two successive PAUSE instructions exceeds VmxPleGap, the CPU considers the
357 * latest PAUSE instruction to be start of a new PAUSE loop.
358 */
359 rc = CFGMR3QueryU32Def(pCfgHm, "VmxPleGap", &pVM->hm.s.vmx.cPleGapTicks, 0);
360 AssertRCReturn(rc, rc);
361
362 /** @cfgm{/HM/VmxPleWindow, uint32_t, 0}
363 * The pause-filter exiting window in TSC ticks. When the number of ticks
364 * between the current PAUSE instruction and first PAUSE of a loop exceeds
365 * VmxPleWindow, a VM-exit is triggered.
366 *
367 * Setting VmxPleGap and VmxPleGap to 0 disables pause-filter exiting.
368 */
369 rc = CFGMR3QueryU32Def(pCfgHm, "VmxPleWindow", &pVM->hm.s.vmx.cPleWindowTicks, 0);
370 AssertRCReturn(rc, rc);
371
372 /** @cfgm{/HM/VmxLbr, bool, false}
373 * Whether to enable LBR for the guest. This is disabled by default as it's only
374 * useful while debugging and enabling it causes a noticeable performance hit. */
375 rc = CFGMR3QueryBoolDef(pCfgHm, "VmxLbr", &pVM->hm.s.vmx.fLbr, false);
376 AssertRCReturn(rc, rc);
377
378 /** @cfgm{/HM/SvmPauseFilterCount, uint16_t, 0}
379 * A counter that is decrement each time a PAUSE instruction is executed by the
380 * guest. When the counter is 0, a \#VMEXIT is triggered.
381 *
382 * Setting SvmPauseFilterCount to 0 disables pause-filter exiting.
383 */
384 rc = CFGMR3QueryU16Def(pCfgHm, "SvmPauseFilter", &pVM->hm.s.svm.cPauseFilter, 0);
385 AssertRCReturn(rc, rc);
386
387 /** @cfgm{/HM/SvmPauseFilterThreshold, uint16_t, 0}
388 * The pause filter threshold in ticks. When the elapsed time (in ticks) between
389 * two successive PAUSE instructions exceeds SvmPauseFilterThreshold, the
390 * PauseFilter count is reset to its initial value. However, if PAUSE is
391 * executed PauseFilter times within PauseFilterThreshold ticks, a VM-exit will
392 * be triggered.
393 *
394 * Requires SvmPauseFilterCount to be non-zero for pause-filter threshold to be
395 * activated.
396 */
397 rc = CFGMR3QueryU16Def(pCfgHm, "SvmPauseFilterThreshold", &pVM->hm.s.svm.cPauseFilterThresholdTicks, 0);
398 AssertRCReturn(rc, rc);
399
400 /** @cfgm{/HM/SvmVirtVmsaveVmload, bool, true}
401 * Whether to make use of virtualized VMSAVE/VMLOAD feature of the CPU if it's
402 * available. */
403 rc = CFGMR3QueryBoolDef(pCfgHm, "SvmVirtVmsaveVmload", &pVM->hm.s.svm.fVirtVmsaveVmload, true);
404 AssertRCReturn(rc, rc);
405
406 /** @cfgm{/HM/SvmVGif, bool, true}
407 * Whether to make use of Virtual GIF (Global Interrupt Flag) feature of the CPU
408 * if it's available. */
409 rc = CFGMR3QueryBoolDef(pCfgHm, "SvmVGif", &pVM->hm.s.svm.fVGif, true);
410 AssertRCReturn(rc, rc);
411
412 /** @cfgm{/HM/SvmLbrVirt, bool, false}
413 * Whether to make use of the LBR virtualization feature of the CPU if it's
414 * available. This is disabled by default as it's only useful while debugging
415 * and enabling it causes a small hit to performance. */
416 rc = CFGMR3QueryBoolDef(pCfgHm, "SvmLbrVirt", &pVM->hm.s.svm.fLbrVirt, false);
417 AssertRCReturn(rc, rc);
418
419 /** @cfgm{/HM/Exclusive, bool}
420 * Determines the init method for AMD-V and VT-x. If set to true, HM will do a
421 * global init for each host CPU. If false, we do local init each time we wish
422 * to execute guest code.
423 *
424 * On Windows, default is false due to the higher risk of conflicts with other
425 * hypervisors.
426 *
427 * On Mac OS X, this setting is ignored since the code does not handle local
428 * init when it utilizes the OS provided VT-x function, SUPR0EnableVTx().
429 */
430#if defined(RT_OS_DARWIN)
431 pVM->hm.s.fGlobalInit = true;
432#else
433 rc = CFGMR3QueryBoolDef(pCfgHm, "Exclusive", &pVM->hm.s.fGlobalInit,
434# if defined(RT_OS_WINDOWS)
435 false
436# else
437 true
438# endif
439 );
440 AssertLogRelRCReturn(rc, rc);
441#endif
442
443 /** @cfgm{/HM/MaxResumeLoops, uint32_t}
444 * The number of times to resume guest execution before we forcibly return to
445 * ring-3. The return value of RTThreadPreemptIsPendingTrusty in ring-0
446 * determines the default value. */
447 rc = CFGMR3QueryU32Def(pCfgHm, "MaxResumeLoops", &pVM->hm.s.cMaxResumeLoopsCfg, 0 /* set by R0 later */);
448 AssertLogRelRCReturn(rc, rc);
449
450 /** @cfgm{/HM/UseVmxPreemptTimer, bool}
451 * Whether to make use of the VMX-preemption timer feature of the CPU if it's
452 * available. */
453 rc = CFGMR3QueryBoolDef(pCfgHm, "UseVmxPreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimerCfg, true);
454 AssertLogRelRCReturn(rc, rc);
455
456 /** @cfgm{/HM/IBPBOnVMExit, bool}
457 * Costly paranoia setting. */
458 rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMExit", &pVM->hm.s.fIbpbOnVmExit, false);
459 AssertLogRelRCReturn(rc, rc);
460
461 /** @cfgm{/HM/IBPBOnVMEntry, bool}
462 * Costly paranoia setting. */
463 rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMEntry", &pVM->hm.s.fIbpbOnVmEntry, false);
464 AssertLogRelRCReturn(rc, rc);
465
466 /** @cfgm{/HM/L1DFlushOnSched, bool, true}
467 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */
468 rc = CFGMR3QueryBoolDef(pCfgHm, "L1DFlushOnSched", &pVM->hm.s.fL1dFlushOnSched, true);
469 AssertLogRelRCReturn(rc, rc);
470
471 /** @cfgm{/HM/L1DFlushOnVMEntry, bool}
472 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */
473 rc = CFGMR3QueryBoolDef(pCfgHm, "L1DFlushOnVMEntry", &pVM->hm.s.fL1dFlushOnVmEntry, false);
474 AssertLogRelRCReturn(rc, rc);
475
476 /* Disable L1DFlushOnSched if L1DFlushOnVMEntry is enabled. */
477 if (pVM->hm.s.fL1dFlushOnVmEntry)
478 pVM->hm.s.fL1dFlushOnSched = false;
479
480 /** @cfgm{/HM/SpecCtrlByHost, bool}
481 * Another expensive paranoia setting. */
482 rc = CFGMR3QueryBoolDef(pCfgHm, "SpecCtrlByHost", &pVM->hm.s.fSpecCtrlByHost, false);
483 AssertLogRelRCReturn(rc, rc);
484
485 /** @cfgm{/HM/MDSClearOnSched, bool, true}
486 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround,
487 * ignored on CPUs that aren't affected. */
488 rc = CFGMR3QueryBoolDef(pCfgHm, "MDSClearOnSched", &pVM->hm.s.fMdsClearOnSched, true);
489 AssertLogRelRCReturn(rc, rc);
490
491 /** @cfgm{/HM/MDSClearOnVmEntry, bool, false}
492 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround,
493 * ignored on CPUs that aren't affected. */
494 rc = CFGMR3QueryBoolDef(pCfgHm, "MDSClearOnVmEntry", &pVM->hm.s.fMdsClearOnVmEntry, false);
495 AssertLogRelRCReturn(rc, rc);
496
497 /* Disable MDSClearOnSched if MDSClearOnVmEntry is enabled. */
498 if (pVM->hm.s.fMdsClearOnVmEntry)
499 pVM->hm.s.fMdsClearOnSched = false;
500
501 /** @cfgm{/HM/LovelyMesaDrvWorkaround,bool}
502 * Workaround for mesa vmsvga 3d driver making incorrect assumptions about
503 * the hypervisor it is running under. */
504 bool f;
505 rc = CFGMR3QueryBoolDef(pCfgHm, "LovelyMesaDrvWorkaround", &f, false);
506 AssertLogRelRCReturn(rc, rc);
507 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
508 {
509 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
510 pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv = f;
511 }
512
513 /*
514 * Check if VT-x or AMD-v support according to the users wishes.
515 */
516 /** @todo SUPR3QueryVTCaps won't catch VERR_VMX_IN_VMX_ROOT_MODE or
517 * VERR_SVM_IN_USE. */
518 if (pVM->fHMEnabled)
519 {
520 uint32_t fCaps;
521 rc = SUPR3QueryVTCaps(&fCaps);
522 if (RT_SUCCESS(rc))
523 {
524 if (fCaps & SUPVTCAPS_AMD_V)
525 {
526 pVM->hm.s.svm.fSupported = true;
527 LogRel(("HM: HMR3Init: AMD-V%s\n", fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : ""));
528 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_HW_VIRT);
529 }
530 else if (fCaps & SUPVTCAPS_VT_X)
531 {
532 const char *pszWhy;
533 rc = SUPR3QueryVTxSupported(&pszWhy);
534 if (RT_SUCCESS(rc))
535 {
536 pVM->hm.s.vmx.fSupported = true;
537 LogRel(("HM: HMR3Init: VT-x%s%s%s\n",
538 fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : "",
539 fCaps & SUPVTCAPS_VTX_UNRESTRICTED_GUEST ? " and unrestricted guest execution" : "",
540 (fCaps & (SUPVTCAPS_NESTED_PAGING | SUPVTCAPS_VTX_UNRESTRICTED_GUEST)) ? " hw support" : ""));
541 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_HW_VIRT);
542 }
543 else
544 {
545 /*
546 * Before failing, try fallback to NEM if we're allowed to do that.
547 */
548 pVM->fHMEnabled = false;
549 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET);
550 if (fFallbackToNEM)
551 {
552 LogRel(("HM: HMR3Init: Attempting fall back to NEM: The host kernel does not support VT-x - %s\n", pszWhy));
553 int rc2 = NEMR3Init(pVM, true /*fFallback*/, fHMForced);
554
555 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
556 if ( RT_SUCCESS(rc2)
557 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET)
558 rc = VINF_SUCCESS;
559 }
560 if (RT_FAILURE(rc))
561 return VMSetError(pVM, rc, RT_SRC_POS, "The host kernel does not support VT-x: %s\n", pszWhy);
562 }
563 }
564 else
565 AssertLogRelMsgFailedReturn(("SUPR3QueryVTCaps didn't return either AMD-V or VT-x flag set (%#x)!\n", fCaps),
566 VERR_INTERNAL_ERROR_5);
567
568 /*
569 * Disable nested paging and unrestricted guest execution now if they're
570 * configured so that CPUM can make decisions based on our configuration.
571 */
572 if ( fAllowNestedPaging
573 && (fCaps & SUPVTCAPS_NESTED_PAGING))
574 {
575 pVM->hm.s.fNestedPagingCfg = true;
576 if (fCaps & SUPVTCAPS_VT_X)
577 {
578 if ( fAllowUnrestricted
579 && (fCaps & SUPVTCAPS_VTX_UNRESTRICTED_GUEST))
580 pVM->hm.s.vmx.fUnrestrictedGuestCfg = true;
581 else
582 Assert(!pVM->hm.s.vmx.fUnrestrictedGuestCfg);
583 }
584 }
585 else
586 Assert(!pVM->hm.s.fNestedPagingCfg);
587 }
588 else
589 {
590 const char *pszMsg;
591 switch (rc)
592 {
593 case VERR_UNSUPPORTED_CPU: pszMsg = "Unknown CPU, VT-x or AMD-v features cannot be ascertained"; break;
594 case VERR_VMX_NO_VMX: pszMsg = "VT-x is not available"; break;
595 case VERR_VMX_MSR_VMX_DISABLED: pszMsg = "VT-x is disabled in the BIOS"; break;
596 case VERR_VMX_MSR_ALL_VMX_DISABLED: pszMsg = "VT-x is disabled in the BIOS for all CPU modes"; break;
597 case VERR_VMX_MSR_LOCKING_FAILED: pszMsg = "Failed to enable and lock VT-x features"; break;
598 case VERR_SVM_NO_SVM: pszMsg = "AMD-V is not available"; break;
599 case VERR_SVM_DISABLED: pszMsg = "AMD-V is disabled in the BIOS (or by the host OS)"; break;
600 default:
601 return VMSetError(pVM, rc, RT_SRC_POS, "SUPR3QueryVTCaps failed with %Rrc", rc);
602 }
603
604 /*
605 * Before failing, try fallback to NEM if we're allowed to do that.
606 */
607 pVM->fHMEnabled = false;
608 if (fFallbackToNEM)
609 {
610 LogRel(("HM: HMR3Init: Attempting fall back to NEM: %s\n", pszMsg));
611 int rc2 = NEMR3Init(pVM, true /*fFallback*/, fHMForced);
612 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
613 if ( RT_SUCCESS(rc2)
614 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET)
615 rc = VINF_SUCCESS;
616 }
617 if (RT_FAILURE(rc))
618 return VM_SET_ERROR(pVM, rc, pszMsg);
619 }
620 }
621 else
622 {
623 /*
624 * Disabled HM mean raw-mode, unless NEM is supposed to be used.
625 */
626 if (fUseNEMInstead)
627 {
628 rc = NEMR3Init(pVM, false /*fFallback*/, true);
629 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
630 if (RT_FAILURE(rc))
631 return rc;
632 }
633 if ( pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET
634 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_RAW_MODE
635 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT /* paranoia */)
636 return VM_SET_ERROR(pVM, rc, "Misconfigured VM: No guest execution engine available!");
637 }
638
639 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET);
640 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_RAW_MODE);
641 return VINF_SUCCESS;
642}
643
644
645/**
646 * Initializes HM components after ring-3 phase has been fully initialized.
647 *
648 * @returns VBox status code.
649 * @param pVM The cross context VM structure.
650 */
651static int hmR3InitFinalizeR3(PVM pVM)
652{
653 LogFlowFunc(("\n"));
654
655 if (!HMIsEnabled(pVM))
656 return VINF_SUCCESS;
657
658 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
659 {
660 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
661 pVCpu->hm.s.fActive = false;
662 pVCpu->hm.s.fGIMTrapXcptUD = GIMShouldTrapXcptUD(pVCpu); /* Is safe to call now since GIMR3Init() has completed. */
663 }
664
665 /*
666 * Check if L1D flush is needed/possible.
667 */
668 if ( !pVM->cpum.ro.HostFeatures.fFlushCmd
669 || pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem
670 || pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_End
671 || pVM->cpum.ro.HostFeatures.fArchVmmNeedNotFlushL1d
672 || pVM->cpum.ro.HostFeatures.fArchRdclNo)
673 pVM->hm.s.fL1dFlushOnSched = pVM->hm.s.fL1dFlushOnVmEntry = false;
674
675 /*
676 * Check if MDS flush is needed/possible.
677 * On atoms and knight family CPUs, we will only allow clearing on scheduling.
678 */
679 if ( !pVM->cpum.ro.HostFeatures.fMdsClear
680 || pVM->cpum.ro.HostFeatures.fArchMdsNo)
681 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry = false;
682 else if ( ( pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Atom_Airmount
683 && pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Atom_End)
684 || ( pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Phi_KnightsLanding
685 && pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Phi_End))
686 {
687 if (!pVM->hm.s.fMdsClearOnSched)
688 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry;
689 pVM->hm.s.fMdsClearOnVmEntry = false;
690 }
691 else if ( pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem
692 || pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_End)
693 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry = false;
694
695 /*
696 * Statistics.
697 */
698#ifdef VBOX_WITH_STATISTICS
699 STAM_REG(pVM, &pVM->hm.s.StatTprPatchSuccess, STAMTYPE_COUNTER, "/HM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
700 STAM_REG(pVM, &pVM->hm.s.StatTprPatchFailure, STAMTYPE_COUNTER, "/HM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
701 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccessCr8, STAMTYPE_COUNTER, "/HM/TPR/Replace/SuccessCR8", STAMUNIT_OCCURENCES, "Number of instruction replacements by MOV CR8.");
702 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccessVmc, STAMTYPE_COUNTER, "/HM/TPR/Replace/SuccessVMC", STAMUNIT_OCCURENCES, "Number of instruction replacements by VMMCALL.");
703 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceFailure, STAMTYPE_COUNTER, "/HM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful replace attempts.");
704#endif
705
706#ifdef VBOX_WITH_STATISTICS
707 bool const fCpuSupportsVmx = ASMIsIntelCpu() || ASMIsViaCentaurCpu() || ASMIsShanghaiCpu();
708#endif
709 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
710 {
711 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
712 PHMCPU pHmCpu = &pVCpu->hm.s;
713 int rc;
714
715# define HM_REG_STAT(a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szNmFmt, a_szDesc) do { \
716 rc = STAMR3RegisterF(pVM, a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szDesc, a_szNmFmt, idCpu); \
717 AssertRC(rc); \
718 } while (0)
719# define HM_REG_PROFILE(a_pVar, a_szNmFmt, a_szDesc) \
720 HM_REG_STAT(a_pVar, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, a_szNmFmt, a_szDesc)
721
722#ifdef VBOX_WITH_STATISTICS
723
724 HM_REG_PROFILE(&pHmCpu->StatPoke, "/PROF/CPU%u/HM/Poke", "Profiling of RTMpPokeCpu.");
725 HM_REG_PROFILE(&pHmCpu->StatSpinPoke, "/PROF/CPU%u/HM/PokeWait", "Profiling of poke wait.");
726 HM_REG_PROFILE(&pHmCpu->StatSpinPokeFailed, "/PROF/CPU%u/HM/PokeWaitFailed", "Profiling of poke wait when RTMpPokeCpu fails.");
727 HM_REG_PROFILE(&pHmCpu->StatEntry, "/PROF/CPU%u/HM/Entry", "Profiling of entry until entering GC.");
728 HM_REG_PROFILE(&pHmCpu->StatPreExit, "/PROF/CPU%u/HM/SwitchFromGC_1", "Profiling of pre-exit processing after returning from GC.");
729 HM_REG_PROFILE(&pHmCpu->StatExitHandling, "/PROF/CPU%u/HM/SwitchFromGC_2", "Profiling of exit handling (longjmps not included!)");
730 HM_REG_PROFILE(&pHmCpu->StatExitIO, "/PROF/CPU%u/HM/SwitchFromGC_2/IO", "I/O.");
731 HM_REG_PROFILE(&pHmCpu->StatExitMovCRx, "/PROF/CPU%u/HM/SwitchFromGC_2/MovCRx", "MOV CRx.");
732 HM_REG_PROFILE(&pHmCpu->StatExitXcptNmi, "/PROF/CPU%u/HM/SwitchFromGC_2/XcptNmi", "Exceptions, NMIs.");
733 HM_REG_PROFILE(&pHmCpu->StatExitVmentry, "/PROF/CPU%u/HM/SwitchFromGC_2/Vmentry", "VMLAUNCH/VMRESUME on Intel or VMRUN on AMD.");
734 HM_REG_PROFILE(&pHmCpu->StatImportGuestState, "/PROF/CPU%u/HM/ImportGuestState", "Profiling of importing guest state from hardware after VM-exit.");
735 HM_REG_PROFILE(&pHmCpu->StatExportGuestState, "/PROF/CPU%u/HM/ExportGuestState", "Profiling of exporting guest state to hardware before VM-entry.");
736 HM_REG_PROFILE(&pHmCpu->StatLoadGuestFpuState, "/PROF/CPU%u/HM/LoadGuestFpuState", "Profiling of CPUMR0LoadGuestFPU.");
737 HM_REG_PROFILE(&pHmCpu->StatInGC, "/PROF/CPU%u/HM/InGC", "Profiling of execution of guest-code in hardware.");
738# ifdef HM_PROFILE_EXIT_DISPATCH
739 HM_REG_STAT(&pHmCpu->StatExitDispatch, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
740 "/PROF/CPU%u/HM/ExitDispatch", "Profiling the dispatching of exit handlers.");
741# endif
742#endif
743# define HM_REG_COUNTER(a, b, desc) HM_REG_STAT(a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, b, desc)
744
745#ifdef VBOX_WITH_STATISTICS
746 HM_REG_COUNTER(&pHmCpu->StatExitAll, "/HM/CPU%u/Exit/All", "Total exits (including nested-guest exits).");
747 HM_REG_COUNTER(&pHmCpu->StatNestedExitAll, "/HM/CPU%u/Exit/NestedGuest/All", "Total nested-guest exits.");
748 HM_REG_COUNTER(&pHmCpu->StatExitShadowNM, "/HM/CPU%u/Exit/Trap/Shw/#NM", "Shadow #NM (device not available, no math co-processor) exception.");
749 HM_REG_COUNTER(&pHmCpu->StatExitGuestNM, "/HM/CPU%u/Exit/Trap/Gst/#NM", "Guest #NM (device not available, no math co-processor) exception.");
750 HM_REG_COUNTER(&pHmCpu->StatExitShadowPF, "/HM/CPU%u/Exit/Trap/Shw/#PF", "Shadow #PF (page fault) exception.");
751 HM_REG_COUNTER(&pHmCpu->StatExitShadowPFEM, "/HM/CPU%u/Exit/Trap/Shw/#PF-EM", "#PF (page fault) exception going back to ring-3 for emulating the instruction.");
752 HM_REG_COUNTER(&pHmCpu->StatExitGuestPF, "/HM/CPU%u/Exit/Trap/Gst/#PF", "Guest #PF (page fault) exception.");
753 HM_REG_COUNTER(&pHmCpu->StatExitGuestUD, "/HM/CPU%u/Exit/Trap/Gst/#UD", "Guest #UD (undefined opcode) exception.");
754 HM_REG_COUNTER(&pHmCpu->StatExitGuestSS, "/HM/CPU%u/Exit/Trap/Gst/#SS", "Guest #SS (stack-segment fault) exception.");
755 HM_REG_COUNTER(&pHmCpu->StatExitGuestNP, "/HM/CPU%u/Exit/Trap/Gst/#NP", "Guest #NP (segment not present) exception.");
756 HM_REG_COUNTER(&pHmCpu->StatExitGuestTS, "/HM/CPU%u/Exit/Trap/Gst/#TS", "Guest #TS (task switch) exception.");
757 HM_REG_COUNTER(&pHmCpu->StatExitGuestOF, "/HM/CPU%u/Exit/Trap/Gst/#OF", "Guest #OF (overflow) exception.");
758 HM_REG_COUNTER(&pHmCpu->StatExitGuestGP, "/HM/CPU%u/Exit/Trap/Gst/#GP", "Guest #GP (general protection) exception.");
759 HM_REG_COUNTER(&pHmCpu->StatExitGuestDE, "/HM/CPU%u/Exit/Trap/Gst/#DE", "Guest #DE (divide error) exception.");
760 HM_REG_COUNTER(&pHmCpu->StatExitGuestDF, "/HM/CPU%u/Exit/Trap/Gst/#DF", "Guest #DF (double fault) exception.");
761 HM_REG_COUNTER(&pHmCpu->StatExitGuestBR, "/HM/CPU%u/Exit/Trap/Gst/#BR", "Guest #BR (boundary range exceeded) exception.");
762 HM_REG_COUNTER(&pHmCpu->StatExitGuestAC, "/HM/CPU%u/Exit/Trap/Gst/#AC", "Guest #AC (alignment check) exception.");
763 HM_REG_COUNTER(&pHmCpu->StatExitGuestDB, "/HM/CPU%u/Exit/Trap/Gst/#DB", "Guest #DB (debug) exception.");
764 HM_REG_COUNTER(&pHmCpu->StatExitGuestMF, "/HM/CPU%u/Exit/Trap/Gst/#MF", "Guest #MF (x87 FPU error, math fault) exception.");
765 HM_REG_COUNTER(&pHmCpu->StatExitGuestBP, "/HM/CPU%u/Exit/Trap/Gst/#BP", "Guest #BP (breakpoint) exception.");
766 HM_REG_COUNTER(&pHmCpu->StatExitGuestXF, "/HM/CPU%u/Exit/Trap/Gst/#XF", "Guest #XF (extended math fault, SIMD FPU) exception.");
767 HM_REG_COUNTER(&pHmCpu->StatExitGuestXcpUnk, "/HM/CPU%u/Exit/Trap/Gst/Other", "Other guest exceptions.");
768 HM_REG_COUNTER(&pHmCpu->StatExitRdmsr, "/HM/CPU%u/Exit/Instr/Rdmsr", "MSR read.");
769 HM_REG_COUNTER(&pHmCpu->StatExitWrmsr, "/HM/CPU%u/Exit/Instr/Wrmsr", "MSR write.");
770 HM_REG_COUNTER(&pHmCpu->StatExitDRxWrite, "/HM/CPU%u/Exit/Instr/DR-Write", "Debug register write.");
771 HM_REG_COUNTER(&pHmCpu->StatExitDRxRead, "/HM/CPU%u/Exit/Instr/DR-Read", "Debug register read.");
772 HM_REG_COUNTER(&pHmCpu->StatExitCR0Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR0", "CR0 read.");
773 HM_REG_COUNTER(&pHmCpu->StatExitCR2Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR2", "CR2 read.");
774 HM_REG_COUNTER(&pHmCpu->StatExitCR3Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR3", "CR3 read.");
775 HM_REG_COUNTER(&pHmCpu->StatExitCR4Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR4", "CR4 read.");
776 HM_REG_COUNTER(&pHmCpu->StatExitCR8Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR8", "CR8 read.");
777 HM_REG_COUNTER(&pHmCpu->StatExitCR0Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR0", "CR0 write.");
778 HM_REG_COUNTER(&pHmCpu->StatExitCR2Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR2", "CR2 write.");
779 HM_REG_COUNTER(&pHmCpu->StatExitCR3Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR3", "CR3 write.");
780 HM_REG_COUNTER(&pHmCpu->StatExitCR4Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR4", "CR4 write.");
781 HM_REG_COUNTER(&pHmCpu->StatExitCR8Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR8", "CR8 write.");
782 HM_REG_COUNTER(&pHmCpu->StatExitClts, "/HM/CPU%u/Exit/Instr/CLTS", "CLTS instruction.");
783 HM_REG_COUNTER(&pHmCpu->StatExitLmsw, "/HM/CPU%u/Exit/Instr/LMSW", "LMSW instruction.");
784 HM_REG_COUNTER(&pHmCpu->StatExitXdtrAccess, "/HM/CPU%u/Exit/Instr/XdtrAccess", "GDTR, IDTR, LDTR access.");
785 HM_REG_COUNTER(&pHmCpu->StatExitIOWrite, "/HM/CPU%u/Exit/Instr/IO/Write", "I/O write.");
786 HM_REG_COUNTER(&pHmCpu->StatExitIORead, "/HM/CPU%u/Exit/Instr/IO/Read", "I/O read.");
787 HM_REG_COUNTER(&pHmCpu->StatExitIOStringWrite, "/HM/CPU%u/Exit/Instr/IO/WriteString", "String I/O write.");
788 HM_REG_COUNTER(&pHmCpu->StatExitIOStringRead, "/HM/CPU%u/Exit/Instr/IO/ReadString", "String I/O read.");
789 HM_REG_COUNTER(&pHmCpu->StatExitIntWindow, "/HM/CPU%u/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts.");
790 HM_REG_COUNTER(&pHmCpu->StatExitExtInt, "/HM/CPU%u/Exit/ExtInt", "Physical maskable interrupt (host).");
791#endif
792 HM_REG_COUNTER(&pHmCpu->StatExitHostNmiInGC, "/HM/CPU%u/Exit/HostNmiInGC", "Host NMI received while in guest context.");
793 HM_REG_COUNTER(&pHmCpu->StatExitHostNmiInGCIpi, "/HM/CPU%u/Exit/HostNmiInGCIpi", "Host NMI received while in guest context dispatched using IPIs.");
794#ifdef VBOX_WITH_STATISTICS
795 HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer, "/HM/CPU%u/Exit/PreemptTimer", "VMX-preemption timer expired.");
796 HM_REG_COUNTER(&pHmCpu->StatExitTprBelowThreshold, "/HM/CPU%u/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest.");
797 HM_REG_COUNTER(&pHmCpu->StatExitTaskSwitch, "/HM/CPU%u/Exit/TaskSwitch", "Task switch caused through task gate in IDT.");
798 HM_REG_COUNTER(&pHmCpu->StatExitApicAccess, "/HM/CPU%u/Exit/ApicAccess", "APIC access. Guest attempted to access memory at a physical address on the APIC-access page.");
799
800 HM_REG_COUNTER(&pHmCpu->StatSwitchTprMaskedIrq, "/HM/CPU%u/Switch/TprMaskedIrq", "PDMGetInterrupt() signals TPR masks pending Irq.");
801 HM_REG_COUNTER(&pHmCpu->StatSwitchGuestIrq, "/HM/CPU%u/Switch/IrqPending", "PDMGetInterrupt() cleared behind our back!?!.");
802 HM_REG_COUNTER(&pHmCpu->StatSwitchPendingHostIrq, "/HM/CPU%u/Switch/PendingHostIrq", "Exit to ring-3 due to pending host interrupt before executing guest code.");
803 HM_REG_COUNTER(&pHmCpu->StatSwitchHmToR3FF, "/HM/CPU%u/Switch/HmToR3FF", "Exit to ring-3 due to pending timers, EMT rendezvous, critical section etc.");
804 HM_REG_COUNTER(&pHmCpu->StatSwitchVmReq, "/HM/CPU%u/Switch/VmReq", "Exit to ring-3 due to pending VM requests.");
805 HM_REG_COUNTER(&pHmCpu->StatSwitchPgmPoolFlush, "/HM/CPU%u/Switch/PgmPoolFlush", "Exit to ring-3 due to pending PGM pool flush.");
806 HM_REG_COUNTER(&pHmCpu->StatSwitchDma, "/HM/CPU%u/Switch/PendingDma", "Exit to ring-3 due to pending DMA requests.");
807 HM_REG_COUNTER(&pHmCpu->StatSwitchExitToR3, "/HM/CPU%u/Switch/ExitToR3", "Exit to ring-3 (total).");
808 HM_REG_COUNTER(&pHmCpu->StatSwitchLongJmpToR3, "/HM/CPU%u/Switch/LongJmpToR3", "Longjump to ring-3.");
809 HM_REG_COUNTER(&pHmCpu->StatSwitchMaxResumeLoops, "/HM/CPU%u/Switch/MaxResumeLoops", "Maximum VMRESUME inner-loop counter reached.");
810 HM_REG_COUNTER(&pHmCpu->StatSwitchHltToR3, "/HM/CPU%u/Switch/HltToR3", "HLT causing us to go to ring-3.");
811 HM_REG_COUNTER(&pHmCpu->StatSwitchApicAccessToR3, "/HM/CPU%u/Switch/ApicAccessToR3", "APIC access causing us to go to ring-3.");
812#endif
813 HM_REG_COUNTER(&pHmCpu->StatSwitchPreempt, "/HM/CPU%u/Switch/Preempting", "EMT has been preempted while in HM context.");
814#ifdef VBOX_WITH_STATISTICS
815 HM_REG_COUNTER(&pHmCpu->StatSwitchNstGstVmexit, "/HM/CPU%u/Switch/NstGstVmexit", "Nested-guest VM-exit occurred.");
816
817 HM_REG_COUNTER(&pHmCpu->StatInjectInterrupt, "/HM/CPU%u/EventInject/Interrupt", "Injected an external interrupt into the guest.");
818 HM_REG_COUNTER(&pHmCpu->StatInjectXcpt, "/HM/CPU%u/EventInject/Trap", "Injected an exception into the guest.");
819 HM_REG_COUNTER(&pHmCpu->StatInjectReflect, "/HM/CPU%u/EventInject/Reflect", "Reflecting an exception caused due to event injection.");
820 HM_REG_COUNTER(&pHmCpu->StatInjectConvertDF, "/HM/CPU%u/EventInject/ReflectDF", "Injected a converted #DF caused due to event injection.");
821 HM_REG_COUNTER(&pHmCpu->StatInjectInterpret, "/HM/CPU%u/EventInject/Interpret", "Falling back to interpreter for handling exception caused due to event injection.");
822 HM_REG_COUNTER(&pHmCpu->StatInjectReflectNPF, "/HM/CPU%u/EventInject/ReflectNPF", "Reflecting event that caused an EPT violation / nested #PF.");
823
824 HM_REG_COUNTER(&pHmCpu->StatFlushPage, "/HM/CPU%u/Flush/Page", "Invalidating a guest page on all guest CPUs.");
825 HM_REG_COUNTER(&pHmCpu->StatFlushPageManual, "/HM/CPU%u/Flush/Page/Virt", "Invalidating a guest page using guest-virtual address.");
826 HM_REG_COUNTER(&pHmCpu->StatFlushPhysPageManual, "/HM/CPU%u/Flush/Page/Phys", "Invalidating a guest page using guest-physical address.");
827 HM_REG_COUNTER(&pHmCpu->StatFlushTlb, "/HM/CPU%u/Flush/TLB", "Forcing a full guest-TLB flush (ring-0).");
828 HM_REG_COUNTER(&pHmCpu->StatFlushTlbManual, "/HM/CPU%u/Flush/TLB/Manual", "Request a full guest-TLB flush.");
829 HM_REG_COUNTER(&pHmCpu->StatFlushTlbNstGst, "/HM/CPU%u/Flush/TLB/NestedGuest", "Request a nested-guest-TLB flush.");
830 HM_REG_COUNTER(&pHmCpu->StatFlushTlbWorldSwitch, "/HM/CPU%u/Flush/TLB/CpuSwitch", "Forcing a full guest-TLB flush due to host-CPU reschedule or ASID-limit hit by another guest-VCPU.");
831 HM_REG_COUNTER(&pHmCpu->StatNoFlushTlbWorldSwitch, "/HM/CPU%u/Flush/TLB/Skipped", "No TLB flushing required.");
832 HM_REG_COUNTER(&pHmCpu->StatFlushEntire, "/HM/CPU%u/Flush/TLB/Entire", "Flush the entire TLB (host + guest).");
833 HM_REG_COUNTER(&pHmCpu->StatFlushAsid, "/HM/CPU%u/Flush/TLB/ASID", "Flushed guest-TLB entries for the current VPID.");
834 HM_REG_COUNTER(&pHmCpu->StatFlushNestedPaging, "/HM/CPU%u/Flush/TLB/NestedPaging", "Flushed guest-TLB entries for the current EPT.");
835 HM_REG_COUNTER(&pHmCpu->StatFlushTlbInvlpgVirt, "/HM/CPU%u/Flush/TLB/InvlpgVirt", "Invalidated a guest-TLB entry for a guest-virtual address.");
836 HM_REG_COUNTER(&pHmCpu->StatFlushTlbInvlpgPhys, "/HM/CPU%u/Flush/TLB/InvlpgPhys", "Currently not possible, flushes entire guest-TLB.");
837 HM_REG_COUNTER(&pHmCpu->StatTlbShootdown, "/HM/CPU%u/Flush/Shootdown/Page", "Inter-VCPU request to flush queued guest page.");
838 HM_REG_COUNTER(&pHmCpu->StatTlbShootdownFlush, "/HM/CPU%u/Flush/Shootdown/TLB", "Inter-VCPU request to flush entire guest-TLB.");
839
840 HM_REG_COUNTER(&pHmCpu->StatTscParavirt, "/HM/CPU%u/TSC/Paravirt", "Paravirtualized TSC in effect.");
841 HM_REG_COUNTER(&pHmCpu->StatTscOffset, "/HM/CPU%u/TSC/Offset", "TSC offsetting is in effect.");
842 HM_REG_COUNTER(&pHmCpu->StatTscIntercept, "/HM/CPU%u/TSC/Intercept", "Intercept TSC accesses.");
843
844 HM_REG_COUNTER(&pHmCpu->StatDRxArmed, "/HM/CPU%u/Debug/Armed", "Loaded guest-debug state while loading guest-state.");
845 HM_REG_COUNTER(&pHmCpu->StatDRxContextSwitch, "/HM/CPU%u/Debug/ContextSwitch", "Loaded guest-debug state on MOV DRx.");
846 HM_REG_COUNTER(&pHmCpu->StatDRxIoCheck, "/HM/CPU%u/Debug/IOCheck", "Checking for I/O breakpoint.");
847
848 HM_REG_COUNTER(&pHmCpu->StatExportMinimal, "/HM/CPU%u/Export/Minimal", "VM-entry exporting minimal guest-state.");
849 HM_REG_COUNTER(&pHmCpu->StatExportFull, "/HM/CPU%u/Export/Full", "VM-entry exporting the full guest-state.");
850 HM_REG_COUNTER(&pHmCpu->StatLoadGuestFpu, "/HM/CPU%u/Export/GuestFpu", "VM-entry loading the guest-FPU state.");
851 HM_REG_COUNTER(&pHmCpu->StatExportHostState, "/HM/CPU%u/Export/HostState", "VM-entry exporting host-state.");
852
853 if (fCpuSupportsVmx)
854 {
855 HM_REG_COUNTER(&pHmCpu->StatVmxWriteHostRip, "/HM/CPU%u/WriteHostRIP", "Number of VMX_VMCS_HOST_RIP instructions.");
856 HM_REG_COUNTER(&pHmCpu->StatVmxWriteHostRsp, "/HM/CPU%u/WriteHostRSP", "Number of VMX_VMCS_HOST_RSP instructions.");
857 HM_REG_COUNTER(&pHmCpu->StatVmxVmLaunch, "/HM/CPU%u/VMLaunch", "Number of VMLAUNCH instructions.");
858 HM_REG_COUNTER(&pHmCpu->StatVmxVmResume, "/HM/CPU%u/VMResume", "Number of VMRESUME instructions.");
859 }
860
861 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelBase, "/HM/CPU%u/VMXCheck/RMSelBase", "Could not use VMX due to unsuitable real-mode selector base.");
862 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelLimit, "/HM/CPU%u/VMXCheck/RMSelLimit", "Could not use VMX due to unsuitable real-mode selector limit.");
863 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelAttr, "/HM/CPU%u/VMXCheck/RMSelAttrs", "Could not use VMX due to unsuitable real-mode selector attributes.");
864
865 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelBase, "/HM/CPU%u/VMXCheck/V86SelBase", "Could not use VMX due to unsuitable v8086-mode selector base.");
866 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelLimit, "/HM/CPU%u/VMXCheck/V86SelLimit", "Could not use VMX due to unsuitable v8086-mode selector limit.");
867 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelAttr, "/HM/CPU%u/VMXCheck/V86SelAttrs", "Could not use VMX due to unsuitable v8086-mode selector attributes.");
868
869 HM_REG_COUNTER(&pHmCpu->StatVmxCheckRmOk, "/HM/CPU%u/VMXCheck/VMX_RM", "VMX execution in real (V86) mode OK.");
870 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadSel, "/HM/CPU%u/VMXCheck/Selector", "Could not use VMX due to unsuitable selector.");
871 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRpl, "/HM/CPU%u/VMXCheck/RPL", "Could not use VMX due to unsuitable RPL.");
872 HM_REG_COUNTER(&pHmCpu->StatVmxCheckPmOk, "/HM/CPU%u/VMXCheck/VMX_PM", "VMX execution in protected mode OK.");
873
874 /*
875 * Guest Exit reason stats.
876 */
877 pHmCpu->paStatExitReason = NULL;
878 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT * sizeof(*pHmCpu->paStatExitReason), 0 /* uAlignment */, MM_TAG_HM,
879 (void **)&pHmCpu->paStatExitReason);
880 AssertRCReturn(rc, rc);
881
882 if (fCpuSupportsVmx)
883 {
884 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
885 {
886 const char *pszExitName = HMGetVmxExitName(j);
887 if (pszExitName)
888 {
889 rc = STAMR3RegisterF(pVM, &pHmCpu->paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
890 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/Reason/%02x", idCpu, j);
891 AssertRCReturn(rc, rc);
892 }
893 }
894 }
895 else
896 {
897 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
898 {
899 const char *pszExitName = HMGetSvmExitName(j);
900 if (pszExitName)
901 {
902 rc = STAMR3RegisterF(pVM, &pHmCpu->paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
903 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/Reason/%02x", idCpu, j);
904 AssertRC(rc);
905 }
906 }
907 }
908 HM_REG_COUNTER(&pHmCpu->StatExitReasonNpf, "/HM/CPU%u/Exit/Reason/#NPF", "Nested page faults");
909
910 pHmCpu->paStatExitReasonR0 = MMHyperR3ToR0(pVM, pHmCpu->paStatExitReason);
911 Assert(pHmCpu->paStatExitReasonR0 != NIL_RTR0PTR);
912
913#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
914 /*
915 * Nested-guest VM-exit reason stats.
916 */
917 pHmCpu->paStatNestedExitReason = NULL;
918 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT * sizeof(*pHmCpu->paStatNestedExitReason), 0 /* uAlignment */, MM_TAG_HM,
919 (void **)&pHmCpu->paStatNestedExitReason);
920 AssertRCReturn(rc, rc);
921 if (fCpuSupportsVmx)
922 {
923 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
924 {
925 const char *pszExitName = HMGetVmxExitName(j);
926 if (pszExitName)
927 {
928 rc = STAMR3RegisterF(pVM, &pHmCpu->paStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
929 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/NestedGuest/Reason/%02x", idCpu, j);
930 AssertRC(rc);
931 }
932 }
933 }
934 else
935 {
936 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
937 {
938 const char *pszExitName = HMGetSvmExitName(j);
939 if (pszExitName)
940 {
941 rc = STAMR3RegisterF(pVM, &pHmCpu->paStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
942 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/NestedGuest/Reason/%02x", idCpu, j);
943 AssertRC(rc);
944 }
945 }
946 }
947 HM_REG_COUNTER(&pHmCpu->StatNestedExitReasonNpf, "/HM/CPU%u/Exit/NestedGuest/Reason/#NPF", "Nested page faults");
948 pHmCpu->paStatNestedExitReasonR0 = MMHyperR3ToR0(pVM, pHmCpu->paStatNestedExitReason);
949 Assert(pHmCpu->paStatNestedExitReasonR0 != NIL_RTR0PTR);
950#endif
951
952 /*
953 * Injected interrupts stats.
954 */
955 {
956 uint32_t const cInterrupts = 0xff + 1;
957 rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * cInterrupts, 8, MM_TAG_HM, (void **)&pHmCpu->paStatInjectedIrqs);
958 AssertRCReturn(rc, rc);
959 pHmCpu->paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pHmCpu->paStatInjectedIrqs);
960 Assert(pHmCpu->paStatInjectedIrqsR0 != NIL_RTR0PTR);
961 for (unsigned j = 0; j < cInterrupts; j++)
962 {
963 char aszIntrName[64];
964 RTStrPrintf(&aszIntrName[0], sizeof(aszIntrName), "Interrupt %u", j);
965 rc = STAMR3RegisterF(pVM, &pHmCpu->paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
966 STAMUNIT_OCCURENCES, aszIntrName,
967 "/HM/CPU%u/EventInject/InjectIntr/%02X", idCpu, j);
968 AssertRC(rc);
969 }
970 }
971
972 /*
973 * Injected exception stats.
974 */
975 {
976 uint32_t const cXcpts = X86_XCPT_LAST + 1;
977 rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * cXcpts, 8, MM_TAG_HM, (void **)&pHmCpu->paStatInjectedXcpts);
978 AssertRCReturn(rc, rc);
979 pHmCpu->paStatInjectedXcptsR0 = MMHyperR3ToR0(pVM, pHmCpu->paStatInjectedXcpts);
980 Assert(pHmCpu->paStatInjectedXcptsR0 != NIL_RTR0PTR);
981 for (unsigned j = 0; j < cXcpts; j++)
982 {
983 char aszXcptName[64];
984 RTStrPrintf(&aszXcptName[0], sizeof(aszXcptName), "%s exception", hmR3GetXcptName(j));
985 rc = STAMR3RegisterF(pVM, &pHmCpu->paStatInjectedXcpts[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
986 STAMUNIT_OCCURENCES, aszXcptName,
987 "/HM/CPU%u/EventInject/InjectXcpt/%02X", idCpu, j);
988 AssertRC(rc);
989 }
990 }
991
992#endif /* VBOX_WITH_STATISTICS */
993#undef HM_REG_COUNTER
994#undef HM_REG_PROFILE
995#undef HM_REG_STAT
996 }
997
998 return VINF_SUCCESS;
999}
1000
1001
1002/**
1003 * Called when a init phase has completed.
1004 *
1005 * @returns VBox status code.
1006 * @param pVM The cross context VM structure.
1007 * @param enmWhat The phase that completed.
1008 */
1009VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1010{
1011 switch (enmWhat)
1012 {
1013 case VMINITCOMPLETED_RING3:
1014 return hmR3InitFinalizeR3(pVM);
1015 case VMINITCOMPLETED_RING0:
1016 return hmR3InitFinalizeR0(pVM);
1017 default:
1018 return VINF_SUCCESS;
1019 }
1020}
1021
1022
1023/**
1024 * Turns off normal raw mode features.
1025 *
1026 * @param pVM The cross context VM structure.
1027 */
1028static void hmR3DisableRawMode(PVM pVM)
1029{
1030/** @todo r=bird: HM shouldn't be doing this crap. */
1031 /* Reinit the paging mode to force the new shadow mode. */
1032 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1033 {
1034 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1035 PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL);
1036 }
1037}
1038
1039
1040/**
1041 * Initialize VT-x or AMD-V.
1042 *
1043 * @returns VBox status code.
1044 * @param pVM The cross context VM structure.
1045 */
1046static int hmR3InitFinalizeR0(PVM pVM)
1047{
1048 int rc;
1049
1050 if (!HMIsEnabled(pVM))
1051 return VINF_SUCCESS;
1052
1053 /*
1054 * Hack to allow users to work around broken BIOSes that incorrectly set
1055 * EFER.SVME, which makes us believe somebody else is already using AMD-V.
1056 */
1057 if ( !pVM->hm.s.vmx.fSupported
1058 && !pVM->hm.s.svm.fSupported
1059 && pVM->hm.s.rcInit == VERR_SVM_IN_USE /* implies functional AMD-V */
1060 && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
1061 {
1062 LogRel(("HM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
1063 pVM->hm.s.svm.fSupported = true;
1064 pVM->hm.s.svm.fIgnoreInUseError = true;
1065 pVM->hm.s.rcInit = VINF_SUCCESS;
1066 }
1067
1068 /*
1069 * Report ring-0 init errors.
1070 */
1071 if ( !pVM->hm.s.vmx.fSupported
1072 && !pVM->hm.s.svm.fSupported)
1073 {
1074 LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.rcInit));
1075 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.MsrsForRing3.u64FeatCtrl));
1076 switch (pVM->hm.s.rcInit)
1077 {
1078 case VERR_VMX_IN_VMX_ROOT_MODE:
1079 return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor");
1080 case VERR_VMX_NO_VMX:
1081 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available");
1082 case VERR_VMX_MSR_VMX_DISABLED:
1083 return VM_SET_ERROR(pVM, VERR_VMX_MSR_VMX_DISABLED, "VT-x is disabled in the BIOS");
1084 case VERR_VMX_MSR_ALL_VMX_DISABLED:
1085 return VM_SET_ERROR(pVM, VERR_VMX_MSR_ALL_VMX_DISABLED, "VT-x is disabled in the BIOS for all CPU modes");
1086 case VERR_VMX_MSR_LOCKING_FAILED:
1087 return VM_SET_ERROR(pVM, VERR_VMX_MSR_LOCKING_FAILED, "Failed to lock VT-x features while trying to enable VT-x");
1088 case VERR_VMX_MSR_VMX_ENABLE_FAILED:
1089 return VM_SET_ERROR(pVM, VERR_VMX_MSR_VMX_ENABLE_FAILED, "Failed to enable VT-x features");
1090 case VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED:
1091 return VM_SET_ERROR(pVM, VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED, "Failed to enable VT-x features in SMX mode");
1092
1093 case VERR_SVM_IN_USE:
1094 return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor");
1095 case VERR_SVM_NO_SVM:
1096 return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available");
1097 case VERR_SVM_DISABLED:
1098 return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS");
1099 }
1100 return VMSetError(pVM, pVM->hm.s.rcInit, RT_SRC_POS, "HM ring-0 init failed: %Rrc", pVM->hm.s.rcInit);
1101 }
1102
1103 /*
1104 * Enable VT-x or AMD-V on all host CPUs.
1105 */
1106 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_HM_ENABLE, 0, NULL);
1107 if (RT_FAILURE(rc))
1108 {
1109 LogRel(("HM: Failed to enable, error %Rrc\n", rc));
1110 HMR3CheckError(pVM, rc);
1111 return rc;
1112 }
1113
1114 /*
1115 * No TPR patching is required when the IO-APIC is not enabled for this VM.
1116 * (Main should have taken care of this already)
1117 */
1118 if (!PDMHasIoApic(pVM))
1119 {
1120 Assert(!pVM->hm.s.fTprPatchingAllowed); /* paranoia */
1121 pVM->hm.s.fTprPatchingAllowed = false;
1122 }
1123
1124 LogRel(("HM: fWorldSwitcher=%#x (fIbpbOnVmExit=%RTbool fIbpbOnVmEntry=%RTbool fL1dFlushOnVmEntry=%RTbool); fL1dFlushOnSched=%RTbool fMdsClearOnVmEntry=%RTbool\n",
1125 pVM->hm.s.fWorldSwitcherForLog, pVM->hm.s.fIbpbOnVmExit, pVM->hm.s.fIbpbOnVmEntry, pVM->hm.s.fL1dFlushOnVmEntry,
1126 pVM->hm.s.fL1dFlushOnSched, pVM->hm.s.fMdsClearOnVmEntry));
1127
1128 /*
1129 * Do the vendor specific initialization
1130 *
1131 * Note! We disable release log buffering here since we're doing relatively
1132 * lot of logging and doesn't want to hit the disk with each LogRel
1133 * statement.
1134 */
1135 AssertLogRelReturn(!pVM->hm.s.fInitialized, VERR_HM_IPE_5);
1136 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
1137 if (pVM->hm.s.vmx.fSupported)
1138 rc = hmR3InitFinalizeR0Intel(pVM);
1139 else
1140 rc = hmR3InitFinalizeR0Amd(pVM);
1141 LogRel((pVM->hm.s.fGlobalInit ? "HM: VT-x/AMD-V init method: Global\n"
1142 : "HM: VT-x/AMD-V init method: Local\n"));
1143 RTLogRelSetBuffering(fOldBuffered);
1144 pVM->hm.s.fInitialized = true;
1145
1146 return rc;
1147}
1148
1149
1150/**
1151 * @callback_method_impl{FNPDMVMMDEVHEAPNOTIFY}
1152 */
1153static DECLCALLBACK(void) hmR3VmmDevHeapNotify(PVM pVM, void *pvAllocation, RTGCPHYS GCPhysAllocation)
1154{
1155 NOREF(pVM);
1156 NOREF(pvAllocation);
1157 NOREF(GCPhysAllocation);
1158}
1159
1160
1161/**
1162 * Returns a description of the VMCS (and associated regions') memory type given the
1163 * IA32_VMX_BASIC MSR.
1164 *
1165 * @returns The descriptive memory type.
1166 * @param uMsrVmxBasic IA32_VMX_BASIC MSR value.
1167 */
1168static const char *hmR3VmxGetMemTypeDesc(uint64_t uMsrVmxBasic)
1169{
1170 uint8_t const uMemType = RT_BF_GET(uMsrVmxBasic, VMX_BF_BASIC_VMCS_MEM_TYPE);
1171 switch (uMemType)
1172 {
1173 case VMX_BASIC_MEM_TYPE_WB: return "Write Back (WB)";
1174 case VMX_BASIC_MEM_TYPE_UC: return "Uncacheable (UC)";
1175 }
1176 return "Unknown";
1177}
1178
1179
1180/**
1181 * Returns a single-line description of all the activity-states supported by the CPU
1182 * given the IA32_VMX_MISC MSR.
1183 *
1184 * @returns All supported activity states.
1185 * @param uMsrMisc IA32_VMX_MISC MSR value.
1186 */
1187static const char *hmR3VmxGetActivityStateAllDesc(uint64_t uMsrMisc)
1188{
1189 static const char * const s_apszActStates[] =
1190 {
1191 "",
1192 " ( HLT )",
1193 " ( SHUTDOWN )",
1194 " ( HLT SHUTDOWN )",
1195 " ( SIPI_WAIT )",
1196 " ( HLT SIPI_WAIT )",
1197 " ( SHUTDOWN SIPI_WAIT )",
1198 " ( HLT SHUTDOWN SIPI_WAIT )"
1199 };
1200 uint8_t const idxActStates = RT_BF_GET(uMsrMisc, VMX_BF_MISC_ACTIVITY_STATES);
1201 Assert(idxActStates < RT_ELEMENTS(s_apszActStates));
1202 return s_apszActStates[idxActStates];
1203}
1204
1205
1206/**
1207 * Reports MSR_IA32_FEATURE_CONTROL MSR to the log.
1208 *
1209 * @param fFeatMsr The feature control MSR value.
1210 */
1211static void hmR3VmxReportFeatCtlMsr(uint64_t fFeatMsr)
1212{
1213 uint64_t const val = fFeatMsr;
1214 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", val));
1215 HMVMX_REPORT_MSR_CAP(val, "LOCK", MSR_IA32_FEATURE_CONTROL_LOCK);
1216 HMVMX_REPORT_MSR_CAP(val, "SMX_VMXON", MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
1217 HMVMX_REPORT_MSR_CAP(val, "VMXON", MSR_IA32_FEATURE_CONTROL_VMXON);
1218 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN0", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_0);
1219 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN1", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_1);
1220 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN2", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_2);
1221 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN3", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_3);
1222 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN4", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_4);
1223 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN5", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_5);
1224 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN6", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_6);
1225 HMVMX_REPORT_MSR_CAP(val, "SENTER_GLOBAL_EN", MSR_IA32_FEATURE_CONTROL_SENTER_GLOBAL_EN);
1226 HMVMX_REPORT_MSR_CAP(val, "SGX_LAUNCH_EN", MSR_IA32_FEATURE_CONTROL_SGX_LAUNCH_EN);
1227 HMVMX_REPORT_MSR_CAP(val, "SGX_GLOBAL_EN", MSR_IA32_FEATURE_CONTROL_SGX_GLOBAL_EN);
1228 HMVMX_REPORT_MSR_CAP(val, "LMCE", MSR_IA32_FEATURE_CONTROL_LMCE);
1229 if (!(val & MSR_IA32_FEATURE_CONTROL_LOCK))
1230 LogRel(("HM: MSR_IA32_FEATURE_CONTROL lock bit not set, possibly bad hardware!\n"));
1231}
1232
1233
1234/**
1235 * Reports MSR_IA32_VMX_BASIC MSR to the log.
1236 *
1237 * @param uBasicMsr The VMX basic MSR value.
1238 */
1239static void hmR3VmxReportBasicMsr(uint64_t uBasicMsr)
1240{
1241 LogRel(("HM: MSR_IA32_VMX_BASIC = %#RX64\n", uBasicMsr));
1242 LogRel(("HM: VMCS id = %#x\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_ID)));
1243 LogRel(("HM: VMCS size = %u bytes\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_SIZE)));
1244 LogRel(("HM: VMCS physical address limit = %s\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_PHYSADDR_WIDTH) ?
1245 "< 4 GB" : "None"));
1246 LogRel(("HM: VMCS memory type = %s\n", hmR3VmxGetMemTypeDesc(uBasicMsr)));
1247 LogRel(("HM: Dual-monitor treatment support = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_DUAL_MON)));
1248 LogRel(("HM: OUTS & INS instruction-info = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_INS_OUTS)));
1249 LogRel(("HM: Supports true-capability MSRs = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_TRUE_CTLS)));
1250 LogRel(("HM: VM-entry Xcpt error-code optional = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_XCPT_ERRCODE)));
1251}
1252
1253
1254/**
1255 * Reports MSR_IA32_PINBASED_CTLS to the log.
1256 *
1257 * @param pVmxMsr Pointer to the VMX MSR.
1258 */
1259static void hmR3VmxReportPinBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1260{
1261 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1262 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1263 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %#RX64\n", pVmxMsr->u));
1264 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EXT_INT_EXIT", VMX_PIN_CTLS_EXT_INT_EXIT);
1265 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "NMI_EXIT", VMX_PIN_CTLS_NMI_EXIT);
1266 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRTUAL_NMI", VMX_PIN_CTLS_VIRT_NMI);
1267 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PREEMPT_TIMER", VMX_PIN_CTLS_PREEMPT_TIMER);
1268 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "POSTED_INT", VMX_PIN_CTLS_POSTED_INT);
1269}
1270
1271
1272/**
1273 * Reports MSR_IA32_VMX_PROCBASED_CTLS MSR to the log.
1274 *
1275 * @param pVmxMsr Pointer to the VMX MSR.
1276 */
1277static void hmR3VmxReportProcBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1278{
1279 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1280 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1281 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %#RX64\n", pVmxMsr->u));
1282 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INT_WINDOW_EXIT", VMX_PROC_CTLS_INT_WINDOW_EXIT);
1283 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TSC_OFFSETTING", VMX_PROC_CTLS_USE_TSC_OFFSETTING);
1284 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "HLT_EXIT", VMX_PROC_CTLS_HLT_EXIT);
1285 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INVLPG_EXIT", VMX_PROC_CTLS_INVLPG_EXIT);
1286 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MWAIT_EXIT", VMX_PROC_CTLS_MWAIT_EXIT);
1287 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDPMC_EXIT", VMX_PROC_CTLS_RDPMC_EXIT);
1288 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDTSC_EXIT", VMX_PROC_CTLS_RDTSC_EXIT);
1289 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR3_LOAD_EXIT", VMX_PROC_CTLS_CR3_LOAD_EXIT);
1290 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR3_STORE_EXIT", VMX_PROC_CTLS_CR3_STORE_EXIT);
1291 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR8_LOAD_EXIT", VMX_PROC_CTLS_CR8_LOAD_EXIT);
1292 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR8_STORE_EXIT", VMX_PROC_CTLS_CR8_STORE_EXIT);
1293 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TPR_SHADOW", VMX_PROC_CTLS_USE_TPR_SHADOW);
1294 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "NMI_WINDOW_EXIT", VMX_PROC_CTLS_NMI_WINDOW_EXIT);
1295 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MOV_DR_EXIT", VMX_PROC_CTLS_MOV_DR_EXIT);
1296 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "UNCOND_IO_EXIT", VMX_PROC_CTLS_UNCOND_IO_EXIT);
1297 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_IO_BITMAPS", VMX_PROC_CTLS_USE_IO_BITMAPS);
1298 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MONITOR_TRAP_FLAG", VMX_PROC_CTLS_MONITOR_TRAP_FLAG);
1299 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_MSR_BITMAPS", VMX_PROC_CTLS_USE_MSR_BITMAPS);
1300 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MONITOR_EXIT", VMX_PROC_CTLS_MONITOR_EXIT);
1301 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PAUSE_EXIT", VMX_PROC_CTLS_PAUSE_EXIT);
1302 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_SECONDARY_CTLS", VMX_PROC_CTLS_USE_SECONDARY_CTLS);
1303}
1304
1305
1306/**
1307 * Reports MSR_IA32_VMX_PROCBASED_CTLS2 MSR to the log.
1308 *
1309 * @param pVmxMsr Pointer to the VMX MSR.
1310 */
1311static void hmR3VmxReportProcBasedCtls2Msr(PCVMXCTLSMSR pVmxMsr)
1312{
1313 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1314 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1315 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %#RX64\n", pVmxMsr->u));
1316 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_APIC_ACCESS", VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
1317 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EPT", VMX_PROC_CTLS2_EPT);
1318 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "DESC_TABLE_EXIT", VMX_PROC_CTLS2_DESC_TABLE_EXIT);
1319 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDTSCP", VMX_PROC_CTLS2_RDTSCP);
1320 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_X2APIC_MODE", VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
1321 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VPID", VMX_PROC_CTLS2_VPID);
1322 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "WBINVD_EXIT", VMX_PROC_CTLS2_WBINVD_EXIT);
1323 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "UNRESTRICTED_GUEST", VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
1324 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "APIC_REG_VIRT", VMX_PROC_CTLS2_APIC_REG_VIRT);
1325 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_INT_DELIVERY", VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
1326 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PAUSE_LOOP_EXIT", VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
1327 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDRAND_EXIT", VMX_PROC_CTLS2_RDRAND_EXIT);
1328 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INVPCID", VMX_PROC_CTLS2_INVPCID);
1329 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VMFUNC", VMX_PROC_CTLS2_VMFUNC);
1330 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VMCS_SHADOWING", VMX_PROC_CTLS2_VMCS_SHADOWING);
1331 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENCLS_EXIT", VMX_PROC_CTLS2_ENCLS_EXIT);
1332 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDSEED_EXIT", VMX_PROC_CTLS2_RDSEED_EXIT);
1333 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PML", VMX_PROC_CTLS2_PML);
1334 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EPT_VE", VMX_PROC_CTLS2_EPT_VE);
1335 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_PROC_CTLS2_CONCEAL_VMX_FROM_PT);
1336 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "XSAVES_XRSTORS", VMX_PROC_CTLS2_XSAVES_XRSTORS);
1337 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MODE_BASED_EPT_PERM", VMX_PROC_CTLS2_MODE_BASED_EPT_PERM);
1338 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SPPTP_EPT", VMX_PROC_CTLS2_SPPTP_EPT);
1339 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PT_EPT", VMX_PROC_CTLS2_PT_EPT);
1340 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "TSC_SCALING", VMX_PROC_CTLS2_TSC_SCALING);
1341 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USER_WAIT_PAUSE", VMX_PROC_CTLS2_USER_WAIT_PAUSE);
1342 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENCLV_EXIT", VMX_PROC_CTLS2_ENCLV_EXIT);
1343}
1344
1345
1346/**
1347 * Reports MSR_IA32_VMX_ENTRY_CTLS to the log.
1348 *
1349 * @param pVmxMsr Pointer to the VMX MSR.
1350 */
1351static void hmR3VmxReportEntryCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1352{
1353 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1354 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1355 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %#RX64\n", pVmxMsr->u));
1356 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_DEBUG", VMX_ENTRY_CTLS_LOAD_DEBUG);
1357 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "IA32E_MODE_GUEST", VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
1358 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENTRY_TO_SMM", VMX_ENTRY_CTLS_ENTRY_TO_SMM);
1359 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "DEACTIVATE_DUAL_MON", VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON);
1360 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PERF_MSR", VMX_ENTRY_CTLS_LOAD_PERF_MSR);
1361 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PAT_MSR", VMX_ENTRY_CTLS_LOAD_PAT_MSR);
1362 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_EFER_MSR", VMX_ENTRY_CTLS_LOAD_EFER_MSR);
1363 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_BNDCFGS_MSR", VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR);
1364 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT);
1365 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_RTIT_CTL_MSR", VMX_ENTRY_CTLS_LOAD_RTIT_CTL_MSR);
1366}
1367
1368
1369/**
1370 * Reports MSR_IA32_VMX_EXIT_CTLS to the log.
1371 *
1372 * @param pVmxMsr Pointer to the VMX MSR.
1373 */
1374static void hmR3VmxReportExitCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1375{
1376 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1377 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1378 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %#RX64\n", pVmxMsr->u));
1379 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_DEBUG", VMX_EXIT_CTLS_SAVE_DEBUG);
1380 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "HOST_ADDR_SPACE_SIZE", VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
1381 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PERF_MSR", VMX_EXIT_CTLS_LOAD_PERF_MSR);
1382 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ACK_EXT_INT", VMX_EXIT_CTLS_ACK_EXT_INT);
1383 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_PAT_MSR", VMX_EXIT_CTLS_SAVE_PAT_MSR);
1384 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PAT_MSR", VMX_EXIT_CTLS_LOAD_PAT_MSR);
1385 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_EFER_MSR", VMX_EXIT_CTLS_SAVE_EFER_MSR);
1386 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_EFER_MSR", VMX_EXIT_CTLS_LOAD_EFER_MSR);
1387 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_PREEMPT_TIMER", VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1388 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CLEAR_BNDCFGS_MSR", VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR);
1389 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT);
1390 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CLEAR_RTIT_CTL_MSR", VMX_EXIT_CTLS_CLEAR_RTIT_CTL_MSR);
1391}
1392
1393
1394/**
1395 * Reports MSR_IA32_VMX_EPT_VPID_CAP MSR to the log.
1396 *
1397 * @param fCaps The VMX EPT/VPID capability MSR value.
1398 */
1399static void hmR3VmxReportEptVpidCapsMsr(uint64_t fCaps)
1400{
1401 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %#RX64\n", fCaps));
1402 HMVMX_REPORT_MSR_CAP(fCaps, "RWX_X_ONLY", MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY);
1403 HMVMX_REPORT_MSR_CAP(fCaps, "PAGE_WALK_LENGTH_4", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4);
1404 HMVMX_REPORT_MSR_CAP(fCaps, "PAGE_WALK_LENGTH_5", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_5);
1405 HMVMX_REPORT_MSR_CAP(fCaps, "EMT_UC", MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC);
1406 HMVMX_REPORT_MSR_CAP(fCaps, "EMT_WB", MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB);
1407 HMVMX_REPORT_MSR_CAP(fCaps, "PDE_2M", MSR_IA32_VMX_EPT_VPID_CAP_PDE_2M);
1408 HMVMX_REPORT_MSR_CAP(fCaps, "PDPTE_1G", MSR_IA32_VMX_EPT_VPID_CAP_PDPTE_1G);
1409 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT);
1410 HMVMX_REPORT_MSR_CAP(fCaps, "EPT_ACCESS_DIRTY", MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY);
1411 HMVMX_REPORT_MSR_CAP(fCaps, "ADVEXITINFO_EPT", MSR_IA32_VMX_EPT_VPID_CAP_ADVEXITINFO_EPT);
1412 HMVMX_REPORT_MSR_CAP(fCaps, "SSS", MSR_IA32_VMX_EPT_VPID_CAP_SSS);
1413 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT);
1414 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
1415 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID);
1416 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_INDIV_ADDR", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
1417 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT);
1418 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS);
1419 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS);
1420}
1421
1422
1423/**
1424 * Reports MSR_IA32_VMX_MISC MSR to the log.
1425 *
1426 * @param pVM Pointer to the VM.
1427 * @param fMisc The VMX misc. MSR value.
1428 */
1429static void hmR3VmxReportMiscMsr(PVM pVM, uint64_t fMisc)
1430{
1431 LogRel(("HM: MSR_IA32_VMX_MISC = %#RX64\n", fMisc));
1432 uint8_t const cPreemptTimerShift = RT_BF_GET(fMisc, VMX_BF_MISC_PREEMPT_TIMER_TSC);
1433 if (cPreemptTimerShift == pVM->hm.s.vmx.cPreemptTimerShift)
1434 LogRel(("HM: PREEMPT_TIMER_TSC = %#x\n", cPreemptTimerShift));
1435 else
1436 {
1437 LogRel(("HM: PREEMPT_TIMER_TSC = %#x - erratum detected, using %#x instead\n", cPreemptTimerShift,
1438 pVM->hm.s.vmx.cPreemptTimerShift));
1439 }
1440 LogRel(("HM: EXIT_SAVE_EFER_LMA = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_EXIT_SAVE_EFER_LMA)));
1441 LogRel(("HM: ACTIVITY_STATES = %#x%s\n", RT_BF_GET(fMisc, VMX_BF_MISC_ACTIVITY_STATES),
1442 hmR3VmxGetActivityStateAllDesc(fMisc)));
1443 LogRel(("HM: INTEL_PT = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_INTEL_PT)));
1444 LogRel(("HM: SMM_READ_SMBASE_MSR = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_SMM_READ_SMBASE_MSR)));
1445 LogRel(("HM: CR3_TARGET = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_CR3_TARGET)));
1446 LogRel(("HM: MAX_MSR = %#x ( %u )\n", RT_BF_GET(fMisc, VMX_BF_MISC_MAX_MSRS),
1447 VMX_MISC_MAX_MSRS(fMisc)));
1448 LogRel(("HM: VMXOFF_BLOCK_SMI = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_VMXOFF_BLOCK_SMI)));
1449 LogRel(("HM: VMWRITE_ALL = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_VMWRITE_ALL)));
1450 LogRel(("HM: ENTRY_INJECT_SOFT_INT = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_ENTRY_INJECT_SOFT_INT)));
1451 LogRel(("HM: MSEG_ID = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_MSEG_ID)));
1452}
1453
1454
1455/**
1456 * Reports MSR_IA32_VMX_VMCS_ENUM MSR to the log.
1457 *
1458 * @param uVmcsEnum The VMX VMCS enum MSR value.
1459 */
1460static void hmR3VmxReportVmcsEnumMsr(uint64_t uVmcsEnum)
1461{
1462 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %#RX64\n", uVmcsEnum));
1463 LogRel(("HM: HIGHEST_IDX = %#x\n", RT_BF_GET(uVmcsEnum, VMX_BF_VMCS_ENUM_HIGHEST_IDX)));
1464}
1465
1466
1467/**
1468 * Reports MSR_IA32_VMX_VMFUNC MSR to the log.
1469 *
1470 * @param uVmFunc The VMX VMFUNC MSR value.
1471 */
1472static void hmR3VmxReportVmFuncMsr(uint64_t uVmFunc)
1473{
1474 LogRel(("HM: MSR_IA32_VMX_VMFUNC = %#RX64\n", uVmFunc));
1475 HMVMX_REPORT_ALLOWED_FEAT(uVmFunc, "EPTP_SWITCHING", RT_BF_GET(uVmFunc, VMX_BF_VMFUNC_EPTP_SWITCHING));
1476}
1477
1478
1479/**
1480 * Reports VMX CR0, CR4 fixed MSRs.
1481 *
1482 * @param pMsrs Pointer to the VMX MSRs.
1483 */
1484static void hmR3VmxReportCrFixedMsrs(PVMXMSRS pMsrs)
1485{
1486 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %#RX64\n", pMsrs->u64Cr0Fixed0));
1487 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %#RX64\n", pMsrs->u64Cr0Fixed1));
1488 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %#RX64\n", pMsrs->u64Cr4Fixed0));
1489 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %#RX64\n", pMsrs->u64Cr4Fixed1));
1490}
1491
1492
1493/**
1494 * Finish VT-x initialization (after ring-0 init).
1495 *
1496 * @returns VBox status code.
1497 * @param pVM The cross context VM structure.
1498 */
1499static int hmR3InitFinalizeR0Intel(PVM pVM)
1500{
1501 int rc;
1502
1503 LogFunc(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
1504 AssertLogRelReturn(pVM->hm.s.vmx.MsrsForRing3.u64FeatCtrl != 0, VERR_HM_IPE_4);
1505
1506 LogRel(("HM: Using VT-x implementation 3.0\n"));
1507 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoopsCfg));
1508 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.vmx.u64HostCr4));
1509 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.vmx.u64HostMsrEfer));
1510 LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL = %#RX64\n", pVM->hm.s.vmx.u64HostSmmMonitorCtl));
1511
1512 hmR3VmxReportFeatCtlMsr(pVM->hm.s.vmx.MsrsForRing3.u64FeatCtrl);
1513 hmR3VmxReportBasicMsr(pVM->hm.s.vmx.MsrsForRing3.u64Basic);
1514
1515 hmR3VmxReportPinBasedCtlsMsr(&pVM->hm.s.vmx.MsrsForRing3.PinCtls);
1516 hmR3VmxReportProcBasedCtlsMsr(&pVM->hm.s.vmx.MsrsForRing3.ProcCtls);
1517 if (pVM->hm.s.vmx.MsrsForRing3.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1518 hmR3VmxReportProcBasedCtls2Msr(&pVM->hm.s.vmx.MsrsForRing3.ProcCtls2);
1519
1520 hmR3VmxReportEntryCtlsMsr(&pVM->hm.s.vmx.MsrsForRing3.EntryCtls);
1521 hmR3VmxReportExitCtlsMsr(&pVM->hm.s.vmx.MsrsForRing3.ExitCtls);
1522
1523 if (RT_BF_GET(pVM->hm.s.vmx.MsrsForRing3.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
1524 {
1525 /* We don't extensively dump the true capability MSRs as we don't use them, see @bugref{9180#c5}. */
1526 LogRel(("HM: MSR_IA32_VMX_TRUE_PINBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.MsrsForRing3.TruePinCtls));
1527 LogRel(("HM: MSR_IA32_VMX_TRUE_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.MsrsForRing3.TrueProcCtls));
1528 LogRel(("HM: MSR_IA32_VMX_TRUE_ENTRY_CTLS = %#RX64\n", pVM->hm.s.vmx.MsrsForRing3.TrueEntryCtls));
1529 LogRel(("HM: MSR_IA32_VMX_TRUE_EXIT_CTLS = %#RX64\n", pVM->hm.s.vmx.MsrsForRing3.TrueExitCtls));
1530 }
1531
1532 hmR3VmxReportMiscMsr(pVM, pVM->hm.s.vmx.MsrsForRing3.u64Misc);
1533 hmR3VmxReportVmcsEnumMsr(pVM->hm.s.vmx.MsrsForRing3.u64VmcsEnum);
1534 if (pVM->hm.s.vmx.MsrsForRing3.u64EptVpidCaps)
1535 hmR3VmxReportEptVpidCapsMsr(pVM->hm.s.vmx.MsrsForRing3.u64EptVpidCaps);
1536 if (pVM->hm.s.vmx.MsrsForRing3.u64VmFunc)
1537 hmR3VmxReportVmFuncMsr(pVM->hm.s.vmx.MsrsForRing3.u64VmFunc);
1538 hmR3VmxReportCrFixedMsrs(&pVM->hm.s.vmx.MsrsForRing3);
1539
1540#ifdef TODO_9217_VMCSINFO
1541 LogRel(("HM: APIC-access page physaddr = %#RHp\n", pVM->hm.s.vmx.HCPhysApicAccess));
1542 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1543 {
1544 PCVMXVMCSINFOSHARED pVmcsInfo = &pVM->apCpusR3[idCpu]->hm.s.vmx.VmcsInfo;
1545 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", idCpu, pVmcsInfo->HCPhysMsrBitmap));
1546 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", idCpu, pVmcsInfo->HCPhysVmcs));
1547 }
1548#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1549 if (pVM->cpum.ro.GuestFeatures.fVmx)
1550 {
1551 LogRel(("HM: Nested-guest:\n"));
1552 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1553 {
1554 PCVMXVMCSINFOSHARED pVmcsInfoNstGst = &pVM->apCpusR3[idCpu]->hm.s.vmx.VmcsInfoNstGst;
1555 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", idCpu, pVmcsInfoNstGst->HCPhysMsrBitmap));
1556 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", idCpu, pVmcsInfoNstGst->HCPhysVmcs));
1557 }
1558 }
1559#endif
1560#endif /* TODO_9217_VMCSINFO */
1561
1562 /*
1563 * EPT and unrestricted guest execution are determined in HMR3Init, verify the sanity of that.
1564 */
1565 AssertLogRelReturn( !pVM->hm.s.fNestedPagingCfg
1566 || (pVM->hm.s.vmx.MsrsForRing3.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_EPT),
1567 VERR_HM_IPE_1);
1568 AssertLogRelReturn( !pVM->hm.s.vmx.fUnrestrictedGuestCfg
1569 || ( (pVM->hm.s.vmx.MsrsForRing3.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
1570 && pVM->hm.s.fNestedPagingCfg),
1571 VERR_HM_IPE_1);
1572
1573 /*
1574 * Disallow RDTSCP in the guest if there is no secondary process-based VM execution controls as otherwise
1575 * RDTSCP would cause a #UD. There might be no CPUs out there where this happens, as RDTSCP was introduced
1576 * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel...
1577 */
1578 if ( !(pVM->hm.s.vmx.MsrsForRing3.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1579 && CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
1580 {
1581 CPUMR3ClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
1582 LogRel(("HM: Disabled RDTSCP\n"));
1583 }
1584
1585 if (!pVM->hm.s.vmx.fUnrestrictedGuestCfg)
1586 {
1587 /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
1588 rc = PDMR3VmmDevHeapAlloc(pVM, HM_VTX_TOTAL_DEVHEAP_MEM, hmR3VmmDevHeapNotify, (RTR3PTR *)&pVM->hm.s.vmx.pRealModeTSS);
1589 if (RT_SUCCESS(rc))
1590 {
1591 /* The IO bitmap starts right after the virtual interrupt redirection bitmap.
1592 Refer Intel spec. 20.3.3 "Software Interrupt Handling in Virtual-8086 mode"
1593 esp. Figure 20-5.*/
1594 ASMMemZero32(pVM->hm.s.vmx.pRealModeTSS, sizeof(*pVM->hm.s.vmx.pRealModeTSS));
1595 pVM->hm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hm.s.vmx.pRealModeTSS);
1596
1597 /* Bit set to 0 means software interrupts are redirected to the
1598 8086 program interrupt handler rather than switching to
1599 protected-mode handler. */
1600 memset(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap, 0, sizeof(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap));
1601
1602 /* Allow all port IO, so that port IO instructions do not cause
1603 exceptions and would instead cause a VM-exit (based on VT-x's
1604 IO bitmap which we currently configure to always cause an exit). */
1605 memset(pVM->hm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE * 2);
1606 *((unsigned char *)pVM->hm.s.vmx.pRealModeTSS + HM_VTX_TSS_SIZE - 2) = 0xff;
1607
1608 /*
1609 * Construct a 1024 element page directory with 4 MB pages for the identity mapped
1610 * page table used in real and protected mode without paging with EPT.
1611 */
1612 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
1613 for (uint32_t i = 0; i < X86_PG_ENTRIES; i++)
1614 {
1615 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
1616 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US
1617 | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS
1618 | X86_PDE4M_G;
1619 }
1620
1621 /* We convert it here every time as PCI regions could be reconfigured. */
1622 if (PDMVmmDevHeapIsEnabled(pVM))
1623 {
1624 RTGCPHYS GCPhys;
1625 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
1626 AssertRCReturn(rc, rc);
1627 LogRel(("HM: Real Mode TSS guest physaddr = %#RGp\n", GCPhys));
1628
1629 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
1630 AssertRCReturn(rc, rc);
1631 LogRel(("HM: Non-Paging Mode EPT CR3 = %#RGp\n", GCPhys));
1632 }
1633 }
1634 else
1635 {
1636 LogRel(("HM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
1637 pVM->hm.s.vmx.pRealModeTSS = NULL;
1638 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = NULL;
1639 return VMSetError(pVM, rc, RT_SRC_POS,
1640 "HM failure: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)", rc);
1641 }
1642 }
1643
1644 LogRel((pVM->hm.s.fAllow64BitGuestsCfg ? "HM: Guest support: 32-bit and 64-bit\n"
1645 : "HM: Guest support: 32-bit only\n"));
1646
1647 /*
1648 * Call ring-0 to set up the VM.
1649 */
1650 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /* idCpu */, VMMR0_DO_HM_SETUP_VM, 0 /* u64Arg */, NULL /* pReqHdr */);
1651 if (rc != VINF_SUCCESS)
1652 {
1653 LogRel(("HM: VMX setup failed with rc=%Rrc!\n", rc));
1654 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1655 {
1656 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1657 LogRel(("HM: CPU[%u] Last instruction error %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32InstrError));
1658 LogRel(("HM: CPU[%u] HM error %#x (%u)\n", idCpu, pVCpu->hm.s.u32HMError, pVCpu->hm.s.u32HMError));
1659 }
1660 HMR3CheckError(pVM, rc);
1661 return VMSetError(pVM, rc, RT_SRC_POS, "VT-x setup failed: %Rrc", rc);
1662 }
1663
1664 LogRel(("HM: Supports VMCS EFER fields = %RTbool\n", pVM->hm.s.vmx.fSupportsVmcsEfer));
1665 LogRel(("HM: Enabled VMX\n"));
1666 pVM->hm.s.vmx.fEnabled = true;
1667
1668 hmR3DisableRawMode(pVM); /** @todo make this go away! */
1669
1670 /*
1671 * Change the CPU features.
1672 */
1673 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1674 if (pVM->hm.s.fAllow64BitGuestsCfg)
1675 {
1676 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1677 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1678 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */
1679 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1680 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1681 }
1682 /* Turn on NXE if PAE has been enabled *and* the host has turned on NXE
1683 (we reuse the host EFER in the switcher). */
1684 /** @todo this needs to be fixed properly!! */
1685 else if (CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1686 {
1687 if (pVM->hm.s.vmx.u64HostMsrEfer & MSR_K6_EFER_NXE)
1688 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1689 else
1690 LogRel(("HM: NX not enabled on the host, unavailable to PAE guest\n"));
1691 }
1692
1693 /*
1694 * Log configuration details.
1695 */
1696 if (pVM->hm.s.fNestedPagingCfg)
1697 {
1698 LogRel(("HM: Enabled nested paging\n"));
1699 if (pVM->hm.s.vmx.enmTlbFlushEptForRing3 == VMXTLBFLUSHEPT_SINGLE_CONTEXT)
1700 LogRel(("HM: EPT flush type = Single context\n"));
1701 else if (pVM->hm.s.vmx.enmTlbFlushEptForRing3 == VMXTLBFLUSHEPT_ALL_CONTEXTS)
1702 LogRel(("HM: EPT flush type = All contexts\n"));
1703 else if (pVM->hm.s.vmx.enmTlbFlushEptForRing3 == VMXTLBFLUSHEPT_NOT_SUPPORTED)
1704 LogRel(("HM: EPT flush type = Not supported\n"));
1705 else
1706 LogRel(("HM: EPT flush type = %#x\n", pVM->hm.s.vmx.enmTlbFlushEptForRing3));
1707
1708 if (pVM->hm.s.vmx.fUnrestrictedGuestCfg)
1709 LogRel(("HM: Enabled unrestricted guest execution\n"));
1710
1711 if (pVM->hm.s.fLargePages)
1712 {
1713 /* Use large (2 MB) pages for our EPT PDEs where possible. */
1714 PGMSetLargePageUsage(pVM, true);
1715 LogRel(("HM: Enabled large page support\n"));
1716 }
1717 }
1718 else
1719 Assert(!pVM->hm.s.vmx.fUnrestrictedGuestCfg);
1720
1721 if (pVM->hm.s.vmx.fVpidForRing3)
1722 {
1723 LogRel(("HM: Enabled VPID\n"));
1724 if (pVM->hm.s.vmx.enmTlbFlushVpidForRing3 == VMXTLBFLUSHVPID_INDIV_ADDR)
1725 LogRel(("HM: VPID flush type = Individual addresses\n"));
1726 else if (pVM->hm.s.vmx.enmTlbFlushVpidForRing3 == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
1727 LogRel(("HM: VPID flush type = Single context\n"));
1728 else if (pVM->hm.s.vmx.enmTlbFlushVpidForRing3 == VMXTLBFLUSHVPID_ALL_CONTEXTS)
1729 LogRel(("HM: VPID flush type = All contexts\n"));
1730 else if (pVM->hm.s.vmx.enmTlbFlushVpidForRing3 == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1731 LogRel(("HM: VPID flush type = Single context retain globals\n"));
1732 else
1733 LogRel(("HM: VPID flush type = %#x\n", pVM->hm.s.vmx.enmTlbFlushVpidForRing3));
1734 }
1735 else if (pVM->hm.s.vmx.enmTlbFlushVpidForRing3 == VMXTLBFLUSHVPID_NOT_SUPPORTED)
1736 LogRel(("HM: Ignoring VPID capabilities of CPU\n"));
1737
1738 if (pVM->hm.s.vmx.fUsePreemptTimerCfg)
1739 LogRel(("HM: Enabled VMX-preemption timer (cPreemptTimerShift=%u)\n", pVM->hm.s.vmx.cPreemptTimerShift));
1740 else
1741 LogRel(("HM: Disabled VMX-preemption timer\n"));
1742
1743 if (pVM->hm.s.fVirtApicRegs)
1744 LogRel(("HM: Enabled APIC-register virtualization support\n"));
1745
1746 if (pVM->hm.s.fPostedIntrs)
1747 LogRel(("HM: Enabled posted-interrupt processing support\n"));
1748
1749 if (pVM->hm.s.vmx.fUseVmcsShadowing)
1750 {
1751 bool const fFullVmcsShadow = RT_BOOL(pVM->hm.s.vmx.MsrsForRing3.u64Misc & VMX_MISC_VMWRITE_ALL);
1752 LogRel(("HM: Enabled %s VMCS shadowing\n", fFullVmcsShadow ? "full" : "partial"));
1753 }
1754
1755 return VINF_SUCCESS;
1756}
1757
1758
1759/**
1760 * Finish AMD-V initialization (after ring-0 init).
1761 *
1762 * @returns VBox status code.
1763 * @param pVM The cross context VM structure.
1764 */
1765static int hmR3InitFinalizeR0Amd(PVM pVM)
1766{
1767 LogFunc(("pVM->hm.s.svm.fSupported = %d\n", pVM->hm.s.svm.fSupported));
1768
1769 LogRel(("HM: Using AMD-V implementation 2.0\n"));
1770
1771 uint32_t u32Family;
1772 uint32_t u32Model;
1773 uint32_t u32Stepping;
1774 if (HMIsSubjectToSvmErratum170(&u32Family, &u32Model, &u32Stepping))
1775 LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
1776 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoopsCfg));
1777 LogRel(("HM: AMD HWCR MSR = %#RX64\n", pVM->hm.s.svm.u64MsrHwcr));
1778 LogRel(("HM: AMD-V revision = %#x\n", pVM->hm.s.svm.u32Rev));
1779 LogRel(("HM: AMD-V max ASID = %RU32\n", pVM->hm.s.uMaxAsidForLog));
1780 LogRel(("HM: AMD-V features = %#x\n", pVM->hm.s.svm.fFeaturesForRing3));
1781
1782 /*
1783 * Enumerate AMD-V features.
1784 */
1785 static const struct { uint32_t fFlag; const char *pszName; } s_aSvmFeatures[] =
1786 {
1787#define HMSVM_REPORT_FEATURE(a_StrDesc, a_Define) { a_Define, a_StrDesc }
1788 HMSVM_REPORT_FEATURE("NESTED_PAGING", X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
1789 HMSVM_REPORT_FEATURE("LBR_VIRT", X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
1790 HMSVM_REPORT_FEATURE("SVM_LOCK", X86_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
1791 HMSVM_REPORT_FEATURE("NRIP_SAVE", X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
1792 HMSVM_REPORT_FEATURE("TSC_RATE_MSR", X86_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
1793 HMSVM_REPORT_FEATURE("VMCB_CLEAN", X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
1794 HMSVM_REPORT_FEATURE("FLUSH_BY_ASID", X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
1795 HMSVM_REPORT_FEATURE("DECODE_ASSISTS", X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS),
1796 HMSVM_REPORT_FEATURE("PAUSE_FILTER", X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
1797 HMSVM_REPORT_FEATURE("PAUSE_FILTER_THRESHOLD", X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD),
1798 HMSVM_REPORT_FEATURE("AVIC", X86_CPUID_SVM_FEATURE_EDX_AVIC),
1799 HMSVM_REPORT_FEATURE("VIRT_VMSAVE_VMLOAD", X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD),
1800 HMSVM_REPORT_FEATURE("VGIF", X86_CPUID_SVM_FEATURE_EDX_VGIF),
1801 HMSVM_REPORT_FEATURE("GMET", X86_CPUID_SVM_FEATURE_EDX_GMET),
1802#undef HMSVM_REPORT_FEATURE
1803 };
1804
1805 uint32_t fSvmFeatures = pVM->hm.s.svm.fFeaturesForRing3;
1806 for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++)
1807 if (fSvmFeatures & s_aSvmFeatures[i].fFlag)
1808 {
1809 LogRel(("HM: %s\n", s_aSvmFeatures[i].pszName));
1810 fSvmFeatures &= ~s_aSvmFeatures[i].fFlag;
1811 }
1812 if (fSvmFeatures)
1813 for (unsigned iBit = 0; iBit < 32; iBit++)
1814 if (RT_BIT_32(iBit) & fSvmFeatures)
1815 LogRel(("HM: Reserved bit %u\n", iBit));
1816
1817 /*
1818 * Nested paging is determined in HMR3Init, verify the sanity of that.
1819 */
1820 AssertLogRelReturn( !pVM->hm.s.fNestedPagingCfg
1821 || (pVM->hm.s.svm.fFeaturesForRing3 & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
1822 VERR_HM_IPE_1);
1823
1824#if 0
1825 /** @todo Add and query IPRT API for host OS support for posted-interrupt IPI
1826 * here. */
1827 if (RTR0IsPostIpiSupport())
1828 pVM->hm.s.fPostedIntrs = true;
1829#endif
1830
1831 /*
1832 * Determine whether we need to intercept #UD in SVM mode for emulating
1833 * intel SYSENTER/SYSEXIT on AMD64, as these instructions results in #UD
1834 * when executed in long-mode. This is only really applicable when
1835 * non-default CPU profiles are in effect, i.e. guest vendor differs
1836 * from the host one.
1837 */
1838 if (CPUMGetGuestCpuVendor(pVM) != CPUMGetHostCpuVendor(pVM))
1839 switch (CPUMGetGuestCpuVendor(pVM))
1840 {
1841 case CPUMCPUVENDOR_INTEL:
1842 case CPUMCPUVENDOR_VIA: /*?*/
1843 case CPUMCPUVENDOR_SHANGHAI: /*?*/
1844 switch (CPUMGetHostCpuVendor(pVM))
1845 {
1846 case CPUMCPUVENDOR_AMD:
1847 case CPUMCPUVENDOR_HYGON:
1848 if (pVM->hm.s.fAllow64BitGuestsCfg)
1849 {
1850 LogRel(("HM: Intercepting #UD for emulating SYSENTER/SYSEXIT in long mode.\n"));
1851 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1852 pVM->apCpusR3[idCpu]->hm.s.svm.fEmulateLongModeSysEnterExit = true;
1853 }
1854 break;
1855 default: break;
1856 }
1857 default: break;
1858 }
1859
1860 /*
1861 * Call ring-0 to set up the VM.
1862 */
1863 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_HM_SETUP_VM, 0, NULL);
1864 if (rc != VINF_SUCCESS)
1865 {
1866 AssertMsgFailed(("%Rrc\n", rc));
1867 LogRel(("HM: AMD-V setup failed with rc=%Rrc!\n", rc));
1868 return VMSetError(pVM, rc, RT_SRC_POS, "AMD-V setup failed: %Rrc", rc);
1869 }
1870
1871 LogRel(("HM: Enabled SVM\n"));
1872 pVM->hm.s.svm.fEnabled = true;
1873
1874 if (pVM->hm.s.fNestedPagingCfg)
1875 {
1876 LogRel(("HM: Enabled nested paging\n"));
1877
1878 /*
1879 * Enable large pages (2 MB) if applicable.
1880 */
1881 if (pVM->hm.s.fLargePages)
1882 {
1883 PGMSetLargePageUsage(pVM, true);
1884 LogRel(("HM: Enabled large page support\n"));
1885 }
1886 }
1887
1888 if (pVM->hm.s.fVirtApicRegs)
1889 LogRel(("HM: Enabled APIC-register virtualization support\n"));
1890
1891 if (pVM->hm.s.fPostedIntrs)
1892 LogRel(("HM: Enabled posted-interrupt processing support\n"));
1893
1894 hmR3DisableRawMode(pVM);
1895
1896 /*
1897 * Change the CPU features.
1898 */
1899 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1900 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
1901 if (pVM->hm.s.fAllow64BitGuestsCfg)
1902 {
1903 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1904 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1905 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1906 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1907 }
1908 /* Turn on NXE if PAE has been enabled. */
1909 else if (CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1910 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1911
1912 LogRel((pVM->hm.s.fTprPatchingAllowed ? "HM: Enabled TPR patching\n"
1913 : "HM: Disabled TPR patching\n"));
1914
1915 LogRel((pVM->hm.s.fAllow64BitGuestsCfg ? "HM: Guest support: 32-bit and 64-bit\n"
1916 : "HM: Guest support: 32-bit only\n"));
1917 return VINF_SUCCESS;
1918}
1919
1920
1921/**
1922 * Applies relocations to data and code managed by this
1923 * component. This function will be called at init and
1924 * whenever the VMM need to relocate it self inside the GC.
1925 *
1926 * @param pVM The cross context VM structure.
1927 */
1928VMMR3_INT_DECL(void) HMR3Relocate(PVM pVM)
1929{
1930 Log(("HMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
1931
1932 /* Fetch the current paging mode during the relocate callback during state loading. */
1933 if (VMR3GetState(pVM) == VMSTATE_LOADING)
1934 {
1935 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1936 {
1937 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1938 pVCpu->hm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
1939 }
1940 }
1941}
1942
1943
1944/**
1945 * Terminates the HM.
1946 *
1947 * Termination means cleaning up and freeing all resources,
1948 * the VM itself is, at this point, powered off or suspended.
1949 *
1950 * @returns VBox status code.
1951 * @param pVM The cross context VM structure.
1952 */
1953VMMR3_INT_DECL(int) HMR3Term(PVM pVM)
1954{
1955 if (pVM->hm.s.vmx.pRealModeTSS)
1956 {
1957 PDMR3VmmDevHeapFree(pVM, pVM->hm.s.vmx.pRealModeTSS);
1958 pVM->hm.s.vmx.pRealModeTSS = 0;
1959 }
1960 hmR3TermCPU(pVM);
1961 return 0;
1962}
1963
1964
1965/**
1966 * Terminates the per-VCPU HM.
1967 *
1968 * @returns VBox status code.
1969 * @param pVM The cross context VM structure.
1970 */
1971static int hmR3TermCPU(PVM pVM)
1972{
1973#ifdef VBOX_WITH_STATISTICS
1974 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1975 {
1976 PVMCPU pVCpu = pVM->apCpusR3[idCpu]; NOREF(pVCpu);
1977 if (pVCpu->hm.s.paStatExitReason)
1978 {
1979 MMHyperFree(pVM, pVCpu->hm.s.paStatExitReason);
1980 pVCpu->hm.s.paStatExitReason = NULL;
1981 pVCpu->hm.s.paStatExitReasonR0 = NIL_RTR0PTR;
1982 }
1983 if (pVCpu->hm.s.paStatInjectedIrqs)
1984 {
1985 MMHyperFree(pVM, pVCpu->hm.s.paStatInjectedIrqs);
1986 pVCpu->hm.s.paStatInjectedIrqs = NULL;
1987 pVCpu->hm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;
1988 }
1989 if (pVCpu->hm.s.paStatInjectedXcpts)
1990 {
1991 MMHyperFree(pVM, pVCpu->hm.s.paStatInjectedXcpts);
1992 pVCpu->hm.s.paStatInjectedXcpts = NULL;
1993 pVCpu->hm.s.paStatInjectedXcptsR0 = NIL_RTR0PTR;
1994 }
1995# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1996 if (pVCpu->hm.s.paStatNestedExitReason)
1997 {
1998 MMHyperFree(pVM, pVCpu->hm.s.paStatNestedExitReason);
1999 pVCpu->hm.s.paStatNestedExitReason = NULL;
2000 pVCpu->hm.s.paStatNestedExitReasonR0 = NIL_RTR0PTR;
2001 }
2002# endif
2003 }
2004#else
2005 RT_NOREF(pVM);
2006#endif
2007 return VINF_SUCCESS;
2008}
2009
2010
2011/**
2012 * Resets a virtual CPU.
2013 *
2014 * Used by HMR3Reset and CPU hot plugging.
2015 *
2016 * @param pVCpu The cross context virtual CPU structure to reset.
2017 */
2018VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu)
2019{
2020 /* Sync. entire state on VM reset ring-0 re-entry. It's safe to reset
2021 the HM flags here, all other EMTs are in ring-3. See VMR3Reset(). */
2022 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
2023
2024 pVCpu->hm.s.fActive = false;
2025 pVCpu->hm.s.Event.fPending = false;
2026 pVCpu->hm.s.vmx.u64GstMsrApicBase = 0;
2027 pVCpu->hm.s.vmx.VmcsInfo.fWasInRealMode = true;
2028#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2029 if (pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmx)
2030 pVCpu->hm.s.vmx.VmcsInfoNstGst.fWasInRealMode = true;
2031#endif
2032}
2033
2034
2035/**
2036 * The VM is being reset.
2037 *
2038 * For the HM component this means that any GDT/LDT/TSS monitors
2039 * needs to be removed.
2040 *
2041 * @param pVM The cross context VM structure.
2042 */
2043VMMR3_INT_DECL(void) HMR3Reset(PVM pVM)
2044{
2045 LogFlow(("HMR3Reset:\n"));
2046
2047 if (HMIsEnabled(pVM))
2048 hmR3DisableRawMode(pVM);
2049
2050 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2051 HMR3ResetCpu(pVM->apCpusR3[idCpu]);
2052
2053 /* Clear all patch information. */
2054 pVM->hm.s.pGuestPatchMem = 0;
2055 pVM->hm.s.pFreeGuestPatchMem = 0;
2056 pVM->hm.s.cbGuestPatchMem = 0;
2057 pVM->hm.s.cPatches = 0;
2058 pVM->hm.s.PatchTree = 0;
2059 pVM->hm.s.fTPRPatchingActive = false;
2060 ASMMemZero32(pVM->hm.s.aPatches, sizeof(pVM->hm.s.aPatches));
2061}
2062
2063
2064/**
2065 * Callback to patch a TPR instruction (vmmcall or mov cr8).
2066 *
2067 * @returns VBox strict status code.
2068 * @param pVM The cross context VM structure.
2069 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2070 * @param pvUser Unused.
2071 */
2072static DECLCALLBACK(VBOXSTRICTRC) hmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
2073{
2074 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2075
2076 /* Only execute the handler on the VCPU the original patch request was issued. */
2077 if (pVCpu->idCpu != idCpu)
2078 return VINF_SUCCESS;
2079
2080 Log(("hmR3RemovePatches\n"));
2081 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
2082 {
2083 uint8_t abInstr[15];
2084 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
2085 RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
2086 int rc;
2087
2088#ifdef LOG_ENABLED
2089 char szOutput[256];
2090 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2091 szOutput, sizeof(szOutput), NULL);
2092 if (RT_SUCCESS(rc))
2093 Log(("Patched instr: %s\n", szOutput));
2094#endif
2095
2096 /* Check if the instruction is still the same. */
2097 rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pInstrGC, pPatch->cbNewOp);
2098 if (rc != VINF_SUCCESS)
2099 {
2100 Log(("Patched code removed? (rc=%Rrc0\n", rc));
2101 continue; /* swapped out or otherwise removed; skip it. */
2102 }
2103
2104 if (memcmp(abInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
2105 {
2106 Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
2107 continue; /* skip it. */
2108 }
2109
2110 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
2111 AssertRC(rc);
2112
2113#ifdef LOG_ENABLED
2114 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2115 szOutput, sizeof(szOutput), NULL);
2116 if (RT_SUCCESS(rc))
2117 Log(("Original instr: %s\n", szOutput));
2118#endif
2119 }
2120 pVM->hm.s.cPatches = 0;
2121 pVM->hm.s.PatchTree = 0;
2122 pVM->hm.s.pFreeGuestPatchMem = pVM->hm.s.pGuestPatchMem;
2123 pVM->hm.s.fTPRPatchingActive = false;
2124 return VINF_SUCCESS;
2125}
2126
2127
2128/**
2129 * Worker for enabling patching in a VT-x/AMD-V guest.
2130 *
2131 * @returns VBox status code.
2132 * @param pVM The cross context VM structure.
2133 * @param idCpu VCPU to execute hmR3RemovePatches on.
2134 * @param pPatchMem Patch memory range.
2135 * @param cbPatchMem Size of the memory range.
2136 */
2137static DECLCALLBACK(int) hmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
2138{
2139 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches, (void *)(uintptr_t)idCpu);
2140 AssertRC(rc);
2141
2142 pVM->hm.s.pGuestPatchMem = pPatchMem;
2143 pVM->hm.s.pFreeGuestPatchMem = pPatchMem;
2144 pVM->hm.s.cbGuestPatchMem = cbPatchMem;
2145 return VINF_SUCCESS;
2146}
2147
2148
2149/**
2150 * Enable patching in a VT-x/AMD-V guest
2151 *
2152 * @returns VBox status code.
2153 * @param pVM The cross context VM structure.
2154 * @param pPatchMem Patch memory range.
2155 * @param cbPatchMem Size of the memory range.
2156 */
2157VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
2158{
2159 VM_ASSERT_EMT(pVM);
2160 Log(("HMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
2161 if (pVM->cCpus > 1)
2162 {
2163 /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
2164 int rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE,
2165 (PFNRT)hmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
2166 AssertRC(rc);
2167 return rc;
2168 }
2169 return hmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
2170}
2171
2172
2173/**
2174 * Disable patching in a VT-x/AMD-V guest.
2175 *
2176 * @returns VBox status code.
2177 * @param pVM The cross context VM structure.
2178 * @param pPatchMem Patch memory range.
2179 * @param cbPatchMem Size of the memory range.
2180 */
2181VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
2182{
2183 Log(("HMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
2184 RT_NOREF2(pPatchMem, cbPatchMem);
2185
2186 Assert(pVM->hm.s.pGuestPatchMem == pPatchMem);
2187 Assert(pVM->hm.s.cbGuestPatchMem == cbPatchMem);
2188
2189 /** @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
2190 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches,
2191 (void *)(uintptr_t)VMMGetCpuId(pVM));
2192 AssertRC(rc);
2193
2194 pVM->hm.s.pGuestPatchMem = 0;
2195 pVM->hm.s.pFreeGuestPatchMem = 0;
2196 pVM->hm.s.cbGuestPatchMem = 0;
2197 pVM->hm.s.fTPRPatchingActive = false;
2198 return VINF_SUCCESS;
2199}
2200
2201
2202/**
2203 * Callback to patch a TPR instruction (vmmcall or mov cr8).
2204 *
2205 * @returns VBox strict status code.
2206 * @param pVM The cross context VM structure.
2207 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2208 * @param pvUser User specified CPU context.
2209 *
2210 */
2211static DECLCALLBACK(VBOXSTRICTRC) hmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
2212{
2213 /*
2214 * Only execute the handler on the VCPU the original patch request was
2215 * issued. (The other CPU(s) might not yet have switched to protected
2216 * mode, nor have the correct memory context.)
2217 */
2218 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2219 if (pVCpu->idCpu != idCpu)
2220 return VINF_SUCCESS;
2221
2222 /*
2223 * We're racing other VCPUs here, so don't try patch the instruction twice
2224 * and make sure there is still room for our patch record.
2225 */
2226 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2227 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
2228 if (pPatch)
2229 {
2230 Log(("hmR3ReplaceTprInstr: already patched %RGv\n", pCtx->rip));
2231 return VINF_SUCCESS;
2232 }
2233 uint32_t const idx = pVM->hm.s.cPatches;
2234 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
2235 {
2236 Log(("hmR3ReplaceTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
2237 return VINF_SUCCESS;
2238 }
2239 pPatch = &pVM->hm.s.aPatches[idx];
2240
2241 Log(("hmR3ReplaceTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
2242
2243 /*
2244 * Disassembler the instruction and get cracking.
2245 */
2246 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3ReplaceTprInstr");
2247 DISCPUSTATE Dis;
2248 uint32_t cbOp;
2249 int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbOp);
2250 AssertRC(rc);
2251 if ( rc == VINF_SUCCESS
2252 && Dis.pCurInstr->uOpcode == OP_MOV
2253 && cbOp >= 3)
2254 {
2255 static uint8_t const s_abVMMCall[3] = { 0x0f, 0x01, 0xd9 };
2256
2257 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
2258 AssertRC(rc);
2259
2260 pPatch->cbOp = cbOp;
2261
2262 if (Dis.Param1.fUse == DISUSE_DISPLACEMENT32)
2263 {
2264 /* write. */
2265 if (Dis.Param2.fUse == DISUSE_REG_GEN32)
2266 {
2267 pPatch->enmType = HMTPRINSTR_WRITE_REG;
2268 pPatch->uSrcOperand = Dis.Param2.Base.idxGenReg;
2269 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_REG %u\n", Dis.Param2.Base.idxGenReg));
2270 }
2271 else
2272 {
2273 Assert(Dis.Param2.fUse == DISUSE_IMMEDIATE32);
2274 pPatch->enmType = HMTPRINSTR_WRITE_IMM;
2275 pPatch->uSrcOperand = Dis.Param2.uValue;
2276 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_IMM %#llx\n", Dis.Param2.uValue));
2277 }
2278 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
2279 AssertRC(rc);
2280
2281 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
2282 pPatch->cbNewOp = sizeof(s_abVMMCall);
2283 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessVmc);
2284 }
2285 else
2286 {
2287 /*
2288 * TPR Read.
2289 *
2290 * Found:
2291 * mov eax, dword [fffe0080] (5 bytes)
2292 * Check if next instruction is:
2293 * shr eax, 4
2294 */
2295 Assert(Dis.Param1.fUse == DISUSE_REG_GEN32);
2296
2297 uint8_t const idxMmioReg = Dis.Param1.Base.idxGenReg;
2298 uint8_t const cbOpMmio = cbOp;
2299 uint64_t const uSavedRip = pCtx->rip;
2300
2301 pCtx->rip += cbOp;
2302 rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbOp);
2303 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Following read");
2304 pCtx->rip = uSavedRip;
2305
2306 if ( rc == VINF_SUCCESS
2307 && Dis.pCurInstr->uOpcode == OP_SHR
2308 && Dis.Param1.fUse == DISUSE_REG_GEN32
2309 && Dis.Param1.Base.idxGenReg == idxMmioReg
2310 && Dis.Param2.fUse == DISUSE_IMMEDIATE8
2311 && Dis.Param2.uValue == 4
2312 && cbOpMmio + cbOp < sizeof(pVM->hm.s.aPatches[idx].aOpcode))
2313 {
2314 uint8_t abInstr[15];
2315
2316 /* Replacing the two instructions above with an AMD-V specific lock-prefixed 32-bit MOV CR8 instruction so as to
2317 access CR8 in 32-bit mode and not cause a #VMEXIT. */
2318 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, cbOpMmio + cbOp);
2319 AssertRC(rc);
2320
2321 pPatch->cbOp = cbOpMmio + cbOp;
2322
2323 /* 0xf0, 0x0f, 0x20, 0xc0 = mov eax, cr8 */
2324 abInstr[0] = 0xf0;
2325 abInstr[1] = 0x0f;
2326 abInstr[2] = 0x20;
2327 abInstr[3] = 0xc0 | Dis.Param1.Base.idxGenReg;
2328 for (unsigned i = 4; i < pPatch->cbOp; i++)
2329 abInstr[i] = 0x90; /* nop */
2330
2331 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, abInstr, pPatch->cbOp);
2332 AssertRC(rc);
2333
2334 memcpy(pPatch->aNewOpcode, abInstr, pPatch->cbOp);
2335 pPatch->cbNewOp = pPatch->cbOp;
2336 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessCr8);
2337
2338 Log(("Acceptable read/shr candidate!\n"));
2339 pPatch->enmType = HMTPRINSTR_READ_SHR4;
2340 }
2341 else
2342 {
2343 pPatch->enmType = HMTPRINSTR_READ;
2344 pPatch->uDstOperand = idxMmioReg;
2345
2346 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
2347 AssertRC(rc);
2348
2349 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
2350 pPatch->cbNewOp = sizeof(s_abVMMCall);
2351 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessVmc);
2352 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_READ %u\n", pPatch->uDstOperand));
2353 }
2354 }
2355
2356 pPatch->Core.Key = pCtx->eip;
2357 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2358 AssertRC(rc);
2359
2360 pVM->hm.s.cPatches++;
2361 return VINF_SUCCESS;
2362 }
2363
2364 /*
2365 * Save invalid patch, so we will not try again.
2366 */
2367 Log(("hmR3ReplaceTprInstr: Failed to patch instr!\n"));
2368 pPatch->Core.Key = pCtx->eip;
2369 pPatch->enmType = HMTPRINSTR_INVALID;
2370 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2371 AssertRC(rc);
2372 pVM->hm.s.cPatches++;
2373 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceFailure);
2374 return VINF_SUCCESS;
2375}
2376
2377
2378/**
2379 * Callback to patch a TPR instruction (jump to generated code).
2380 *
2381 * @returns VBox strict status code.
2382 * @param pVM The cross context VM structure.
2383 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2384 * @param pvUser User specified CPU context.
2385 *
2386 */
2387static DECLCALLBACK(VBOXSTRICTRC) hmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
2388{
2389 /*
2390 * Only execute the handler on the VCPU the original patch request was
2391 * issued. (The other CPU(s) might not yet have switched to protected
2392 * mode, nor have the correct memory context.)
2393 */
2394 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2395 if (pVCpu->idCpu != idCpu)
2396 return VINF_SUCCESS;
2397
2398 /*
2399 * We're racing other VCPUs here, so don't try patch the instruction twice
2400 * and make sure there is still room for our patch record.
2401 */
2402 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2403 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
2404 if (pPatch)
2405 {
2406 Log(("hmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
2407 return VINF_SUCCESS;
2408 }
2409 uint32_t const idx = pVM->hm.s.cPatches;
2410 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
2411 {
2412 Log(("hmR3PatchTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
2413 return VINF_SUCCESS;
2414 }
2415 pPatch = &pVM->hm.s.aPatches[idx];
2416
2417 Log(("hmR3PatchTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
2418 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3PatchTprInstr");
2419
2420 /*
2421 * Disassemble the instruction and get cracking.
2422 */
2423 DISCPUSTATE Dis;
2424 uint32_t cbOp;
2425 int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbOp);
2426 AssertRC(rc);
2427 if ( rc == VINF_SUCCESS
2428 && Dis.pCurInstr->uOpcode == OP_MOV
2429 && cbOp >= 5)
2430 {
2431 uint8_t aPatch[64];
2432 uint32_t off = 0;
2433
2434 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
2435 AssertRC(rc);
2436
2437 pPatch->cbOp = cbOp;
2438 pPatch->enmType = HMTPRINSTR_JUMP_REPLACEMENT;
2439
2440 if (Dis.Param1.fUse == DISUSE_DISPLACEMENT32)
2441 {
2442 /*
2443 * TPR write:
2444 *
2445 * push ECX [51]
2446 * push EDX [52]
2447 * push EAX [50]
2448 * xor EDX,EDX [31 D2]
2449 * mov EAX,EAX [89 C0]
2450 * or
2451 * mov EAX,0000000CCh [B8 CC 00 00 00]
2452 * mov ECX,0C0000082h [B9 82 00 00 C0]
2453 * wrmsr [0F 30]
2454 * pop EAX [58]
2455 * pop EDX [5A]
2456 * pop ECX [59]
2457 * jmp return_address [E9 return_address]
2458 */
2459 bool fUsesEax = (Dis.Param2.fUse == DISUSE_REG_GEN32 && Dis.Param2.Base.idxGenReg == DISGREG_EAX);
2460
2461 aPatch[off++] = 0x51; /* push ecx */
2462 aPatch[off++] = 0x52; /* push edx */
2463 if (!fUsesEax)
2464 aPatch[off++] = 0x50; /* push eax */
2465 aPatch[off++] = 0x31; /* xor edx, edx */
2466 aPatch[off++] = 0xd2;
2467 if (Dis.Param2.fUse == DISUSE_REG_GEN32)
2468 {
2469 if (!fUsesEax)
2470 {
2471 aPatch[off++] = 0x89; /* mov eax, src_reg */
2472 aPatch[off++] = MAKE_MODRM(3, Dis.Param2.Base.idxGenReg, DISGREG_EAX);
2473 }
2474 }
2475 else
2476 {
2477 Assert(Dis.Param2.fUse == DISUSE_IMMEDIATE32);
2478 aPatch[off++] = 0xb8; /* mov eax, immediate */
2479 *(uint32_t *)&aPatch[off] = Dis.Param2.uValue;
2480 off += sizeof(uint32_t);
2481 }
2482 aPatch[off++] = 0xb9; /* mov ecx, 0xc0000082 */
2483 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2484 off += sizeof(uint32_t);
2485
2486 aPatch[off++] = 0x0f; /* wrmsr */
2487 aPatch[off++] = 0x30;
2488 if (!fUsesEax)
2489 aPatch[off++] = 0x58; /* pop eax */
2490 aPatch[off++] = 0x5a; /* pop edx */
2491 aPatch[off++] = 0x59; /* pop ecx */
2492 }
2493 else
2494 {
2495 /*
2496 * TPR read:
2497 *
2498 * push ECX [51]
2499 * push EDX [52]
2500 * push EAX [50]
2501 * mov ECX,0C0000082h [B9 82 00 00 C0]
2502 * rdmsr [0F 32]
2503 * mov EAX,EAX [89 C0]
2504 * pop EAX [58]
2505 * pop EDX [5A]
2506 * pop ECX [59]
2507 * jmp return_address [E9 return_address]
2508 */
2509 Assert(Dis.Param1.fUse == DISUSE_REG_GEN32);
2510
2511 if (Dis.Param1.Base.idxGenReg != DISGREG_ECX)
2512 aPatch[off++] = 0x51; /* push ecx */
2513 if (Dis.Param1.Base.idxGenReg != DISGREG_EDX )
2514 aPatch[off++] = 0x52; /* push edx */
2515 if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
2516 aPatch[off++] = 0x50; /* push eax */
2517
2518 aPatch[off++] = 0x31; /* xor edx, edx */
2519 aPatch[off++] = 0xd2;
2520
2521 aPatch[off++] = 0xb9; /* mov ecx, 0xc0000082 */
2522 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2523 off += sizeof(uint32_t);
2524
2525 aPatch[off++] = 0x0f; /* rdmsr */
2526 aPatch[off++] = 0x32;
2527
2528 if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
2529 {
2530 aPatch[off++] = 0x89; /* mov dst_reg, eax */
2531 aPatch[off++] = MAKE_MODRM(3, DISGREG_EAX, Dis.Param1.Base.idxGenReg);
2532 }
2533
2534 if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
2535 aPatch[off++] = 0x58; /* pop eax */
2536 if (Dis.Param1.Base.idxGenReg != DISGREG_EDX )
2537 aPatch[off++] = 0x5a; /* pop edx */
2538 if (Dis.Param1.Base.idxGenReg != DISGREG_ECX)
2539 aPatch[off++] = 0x59; /* pop ecx */
2540 }
2541 aPatch[off++] = 0xe9; /* jmp return_address */
2542 *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem + off + 4);
2543 off += sizeof(RTRCUINTPTR);
2544
2545 if (pVM->hm.s.pFreeGuestPatchMem + off <= pVM->hm.s.pGuestPatchMem + pVM->hm.s.cbGuestPatchMem)
2546 {
2547 /* Write new code to the patch buffer. */
2548 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hm.s.pFreeGuestPatchMem, aPatch, off);
2549 AssertRC(rc);
2550
2551#ifdef LOG_ENABLED
2552 uint32_t cbCurInstr;
2553 for (RTGCPTR GCPtrInstr = pVM->hm.s.pFreeGuestPatchMem;
2554 GCPtrInstr < pVM->hm.s.pFreeGuestPatchMem + off;
2555 GCPtrInstr += RT_MAX(cbCurInstr, 1))
2556 {
2557 char szOutput[256];
2558 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, GCPtrInstr, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2559 szOutput, sizeof(szOutput), &cbCurInstr);
2560 if (RT_SUCCESS(rc))
2561 Log(("Patch instr %s\n", szOutput));
2562 else
2563 Log(("%RGv: rc=%Rrc\n", GCPtrInstr, rc));
2564 }
2565#endif
2566
2567 pPatch->aNewOpcode[0] = 0xE9;
2568 *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
2569
2570 /* Overwrite the TPR instruction with a jump. */
2571 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
2572 AssertRC(rc);
2573
2574 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Jump");
2575
2576 pVM->hm.s.pFreeGuestPatchMem += off;
2577 pPatch->cbNewOp = 5;
2578
2579 pPatch->Core.Key = pCtx->eip;
2580 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2581 AssertRC(rc);
2582
2583 pVM->hm.s.cPatches++;
2584 pVM->hm.s.fTPRPatchingActive = true;
2585 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchSuccess);
2586 return VINF_SUCCESS;
2587 }
2588
2589 Log(("Ran out of space in our patch buffer!\n"));
2590 }
2591 else
2592 Log(("hmR3PatchTprInstr: Failed to patch instr!\n"));
2593
2594
2595 /*
2596 * Save invalid patch, so we will not try again.
2597 */
2598 pPatch = &pVM->hm.s.aPatches[idx];
2599 pPatch->Core.Key = pCtx->eip;
2600 pPatch->enmType = HMTPRINSTR_INVALID;
2601 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2602 AssertRC(rc);
2603 pVM->hm.s.cPatches++;
2604 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchFailure);
2605 return VINF_SUCCESS;
2606}
2607
2608
2609/**
2610 * Attempt to patch TPR mmio instructions.
2611 *
2612 * @returns VBox status code.
2613 * @param pVM The cross context VM structure.
2614 * @param pVCpu The cross context virtual CPU structure.
2615 */
2616VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu)
2617{
2618 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE,
2619 pVM->hm.s.pGuestPatchMem ? hmR3PatchTprInstr : hmR3ReplaceTprInstr,
2620 (void *)(uintptr_t)pVCpu->idCpu);
2621 AssertRC(rc);
2622 return rc;
2623}
2624
2625
2626/**
2627 * Checks if we need to reschedule due to VMM device heap changes.
2628 *
2629 * @returns true if a reschedule is required, otherwise false.
2630 * @param pVM The cross context VM structure.
2631 * @param pCtx VM execution context.
2632 */
2633VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCCPUMCTX pCtx)
2634{
2635 /*
2636 * The VMM device heap is a requirement for emulating real-mode or protected-mode without paging
2637 * when the unrestricted guest execution feature is missing (VT-x only).
2638 */
2639 if ( pVM->hm.s.vmx.fEnabled
2640 && !pVM->hm.s.vmx.fUnrestrictedGuestCfg
2641 && CPUMIsGuestInRealModeEx(pCtx)
2642 && !PDMVmmDevHeapIsEnabled(pVM))
2643 return true;
2644
2645 return false;
2646}
2647
2648
2649/**
2650 * Noticiation callback from DBGF when interrupt breakpoints or generic debug
2651 * event settings changes.
2652 *
2653 * DBGF will call HMR3NotifyDebugEventChangedPerCpu on each CPU afterwards, this
2654 * function is just updating the VM globals.
2655 *
2656 * @param pVM The VM cross context VM structure.
2657 * @thread EMT(0)
2658 */
2659VMMR3_INT_DECL(void) HMR3NotifyDebugEventChanged(PVM pVM)
2660{
2661 /* Interrupts. */
2662 bool fUseDebugLoop = pVM->dbgf.ro.cSoftIntBreakpoints > 0
2663 || pVM->dbgf.ro.cHardIntBreakpoints > 0;
2664
2665 /* CPU Exceptions. */
2666 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_XCPT_FIRST;
2667 !fUseDebugLoop && enmEvent <= DBGFEVENT_XCPT_LAST;
2668 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2669 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2670
2671 /* Common VM exits. */
2672 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_FIRST;
2673 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_LAST_COMMON;
2674 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2675 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2676
2677 /* Vendor specific VM exits. */
2678 if (HMR3IsVmxEnabled(pVM->pUVM))
2679 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_VMX_FIRST;
2680 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_VMX_LAST;
2681 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2682 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2683 else
2684 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_SVM_FIRST;
2685 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_SVM_LAST;
2686 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2687 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2688
2689 /* Done. */
2690 pVM->hm.s.fUseDebugLoop = fUseDebugLoop;
2691}
2692
2693
2694/**
2695 * Follow up notification callback to HMR3NotifyDebugEventChanged for each CPU.
2696 *
2697 * HM uses this to combine the decision made by HMR3NotifyDebugEventChanged with
2698 * per CPU settings.
2699 *
2700 * @param pVM The VM cross context VM structure.
2701 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2702 */
2703VMMR3_INT_DECL(void) HMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu)
2704{
2705 pVCpu->hm.s.fUseDebugLoop = pVCpu->hm.s.fSingleInstruction | pVM->hm.s.fUseDebugLoop;
2706}
2707
2708
2709/**
2710 * Checks if we are currently using hardware acceleration.
2711 *
2712 * @returns true if hardware acceleration is being used, otherwise false.
2713 * @param pVCpu The cross context virtual CPU structure.
2714 */
2715VMMR3_INT_DECL(bool) HMR3IsActive(PCVMCPU pVCpu)
2716{
2717 return pVCpu->hm.s.fActive;
2718}
2719
2720
2721/**
2722 * External interface for querying whether hardware acceleration is enabled.
2723 *
2724 * @returns true if VT-x or AMD-V is being used, otherwise false.
2725 * @param pUVM The user mode VM handle.
2726 * @sa HMIsEnabled, HMIsEnabledNotMacro.
2727 */
2728VMMR3DECL(bool) HMR3IsEnabled(PUVM pUVM)
2729{
2730 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2731 PVM pVM = pUVM->pVM;
2732 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2733 return pVM->fHMEnabled; /* Don't use the macro as the GUI may query us very very early. */
2734}
2735
2736
2737/**
2738 * External interface for querying whether VT-x is being used.
2739 *
2740 * @returns true if VT-x is being used, otherwise false.
2741 * @param pUVM The user mode VM handle.
2742 * @sa HMR3IsSvmEnabled, HMIsEnabled
2743 */
2744VMMR3DECL(bool) HMR3IsVmxEnabled(PUVM pUVM)
2745{
2746 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2747 PVM pVM = pUVM->pVM;
2748 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2749 return pVM->hm.s.vmx.fEnabled
2750 && pVM->hm.s.vmx.fSupported
2751 && pVM->fHMEnabled;
2752}
2753
2754
2755/**
2756 * External interface for querying whether AMD-V is being used.
2757 *
2758 * @returns true if VT-x is being used, otherwise false.
2759 * @param pUVM The user mode VM handle.
2760 * @sa HMR3IsVmxEnabled, HMIsEnabled
2761 */
2762VMMR3DECL(bool) HMR3IsSvmEnabled(PUVM pUVM)
2763{
2764 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2765 PVM pVM = pUVM->pVM;
2766 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2767 return pVM->hm.s.svm.fEnabled
2768 && pVM->hm.s.svm.fSupported
2769 && pVM->fHMEnabled;
2770}
2771
2772
2773/**
2774 * Checks if we are currently using nested paging.
2775 *
2776 * @returns true if nested paging is being used, otherwise false.
2777 * @param pUVM The user mode VM handle.
2778 */
2779VMMR3DECL(bool) HMR3IsNestedPagingActive(PUVM pUVM)
2780{
2781 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2782 PVM pVM = pUVM->pVM;
2783 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2784 return pVM->hm.s.fNestedPagingCfg;
2785}
2786
2787
2788/**
2789 * Checks if virtualized APIC registers are enabled.
2790 *
2791 * When enabled this feature allows the hardware to access most of the
2792 * APIC registers in the virtual-APIC page without causing VM-exits. See
2793 * Intel spec. 29.1.1 "Virtualized APIC Registers".
2794 *
2795 * @returns true if virtualized APIC registers is enabled, otherwise
2796 * false.
2797 * @param pUVM The user mode VM handle.
2798 */
2799VMMR3DECL(bool) HMR3AreVirtApicRegsEnabled(PUVM pUVM)
2800{
2801 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2802 PVM pVM = pUVM->pVM;
2803 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2804 return pVM->hm.s.fVirtApicRegs;
2805}
2806
2807
2808/**
2809 * Checks if APIC posted-interrupt processing is enabled.
2810 *
2811 * This returns whether we can deliver interrupts to the guest without
2812 * leaving guest-context by updating APIC state from host-context.
2813 *
2814 * @returns true if APIC posted-interrupt processing is enabled,
2815 * otherwise false.
2816 * @param pUVM The user mode VM handle.
2817 */
2818VMMR3DECL(bool) HMR3IsPostedIntrsEnabled(PUVM pUVM)
2819{
2820 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2821 PVM pVM = pUVM->pVM;
2822 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2823 return pVM->hm.s.fPostedIntrs;
2824}
2825
2826
2827/**
2828 * Checks if we are currently using VPID in VT-x mode.
2829 *
2830 * @returns true if VPID is being used, otherwise false.
2831 * @param pUVM The user mode VM handle.
2832 */
2833VMMR3DECL(bool) HMR3IsVpidActive(PUVM pUVM)
2834{
2835 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2836 PVM pVM = pUVM->pVM;
2837 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2838 return pVM->hm.s.vmx.fVpidForRing3;
2839}
2840
2841
2842/**
2843 * Checks if we are currently using VT-x unrestricted execution,
2844 * aka UX.
2845 *
2846 * @returns true if UX is being used, otherwise false.
2847 * @param pUVM The user mode VM handle.
2848 */
2849VMMR3DECL(bool) HMR3IsUXActive(PUVM pUVM)
2850{
2851 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2852 PVM pVM = pUVM->pVM;
2853 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2854 return pVM->hm.s.vmx.fUnrestrictedGuestCfg
2855 || pVM->hm.s.svm.fSupported;
2856}
2857
2858
2859/**
2860 * Checks if the VMX-preemption timer is being used.
2861 *
2862 * @returns true if the VMX-preemption timer is being used, otherwise false.
2863 * @param pVM The cross context VM structure.
2864 */
2865VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM)
2866{
2867 return HMIsEnabled(pVM)
2868 && pVM->hm.s.vmx.fEnabled
2869 && pVM->hm.s.vmx.fUsePreemptTimerCfg;
2870}
2871
2872
2873#ifdef TODO_9217_VMCSINFO
2874/**
2875 * Helper for HMR3CheckError to log VMCS controls to the release log.
2876 *
2877 * @param idCpu The Virtual CPU ID.
2878 * @param pVmcsInfo The VMCS info. object.
2879 */
2880static void hmR3CheckErrorLogVmcsCtls(VMCPUID idCpu, PCVMXVMCSINFO pVmcsInfo)
2881{
2882 LogRel(("HM: CPU[%u] PinCtls %#RX32\n", idCpu, pVmcsInfo->u32PinCtls));
2883 {
2884 uint32_t const u32Val = pVmcsInfo->u32PinCtls;
2885 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_EXT_INT_EXIT );
2886 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_NMI_EXIT );
2887 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_VIRT_NMI );
2888 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_PREEMPT_TIMER);
2889 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_POSTED_INT );
2890 }
2891 LogRel(("HM: CPU[%u] ProcCtls %#RX32\n", idCpu, pVmcsInfo->u32ProcCtls));
2892 {
2893 uint32_t const u32Val = pVmcsInfo->u32ProcCtls;
2894 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INT_WINDOW_EXIT );
2895 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TSC_OFFSETTING);
2896 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_HLT_EXIT );
2897 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INVLPG_EXIT );
2898 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MWAIT_EXIT );
2899 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_RDPMC_EXIT );
2900 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_RDTSC_EXIT );
2901 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR3_LOAD_EXIT );
2902 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR3_STORE_EXIT );
2903 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR8_LOAD_EXIT );
2904 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR8_STORE_EXIT );
2905 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TPR_SHADOW );
2906 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_NMI_WINDOW_EXIT );
2907 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MOV_DR_EXIT );
2908 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_UNCOND_IO_EXIT );
2909 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_IO_BITMAPS );
2910 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MONITOR_TRAP_FLAG );
2911 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_MSR_BITMAPS );
2912 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MONITOR_EXIT );
2913 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_PAUSE_EXIT );
2914 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_SECONDARY_CTLS);
2915 }
2916 LogRel(("HM: CPU[%u] ProcCtls2 %#RX32\n", idCpu, pVmcsInfo->u32ProcCtls2));
2917 {
2918 uint32_t const u32Val = pVmcsInfo->u32ProcCtls2;
2919 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_APIC_ACCESS );
2920 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT );
2921 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_DESC_TABLE_EXIT );
2922 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDTSCP );
2923 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_X2APIC_MODE );
2924 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VPID );
2925 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_WBINVD_EXIT );
2926 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_UNRESTRICTED_GUEST );
2927 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_APIC_REG_VIRT );
2928 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_INT_DELIVERY );
2929 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT );
2930 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDRAND_EXIT );
2931 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_INVPCID );
2932 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VMFUNC );
2933 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VMCS_SHADOWING );
2934 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_ENCLS_EXIT );
2935 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDSEED_EXIT );
2936 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PML );
2937 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT_VE );
2938 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_CONCEAL_VMX_FROM_PT);
2939 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_XSAVES_XRSTORS );
2940 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_MODE_BASED_EPT_PERM);
2941 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_SPPTP_EPT );
2942 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PT_EPT );
2943 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_TSC_SCALING );
2944 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_USER_WAIT_PAUSE );
2945 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_ENCLV_EXIT );
2946 }
2947 LogRel(("HM: CPU[%u] EntryCtls %#RX32\n", idCpu, pVmcsInfo->u32EntryCtls));
2948 {
2949 uint32_t const u32Val = pVmcsInfo->u32EntryCtls;
2950 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_DEBUG );
2951 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_IA32E_MODE_GUEST );
2952 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_ENTRY_TO_SMM );
2953 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON);
2954 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PERF_MSR );
2955 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PAT_MSR );
2956 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_EFER_MSR );
2957 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR );
2958 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT);
2959 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_RTIT_CTL_MSR );
2960 }
2961 LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", idCpu, pVmcsInfo->u32ExitCtls));
2962 {
2963 uint32_t const u32Val = pVmcsInfo->u32ExitCtls;
2964 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_DEBUG );
2965 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE );
2966 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PERF_MSR );
2967 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_ACK_EXT_INT );
2968 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PAT_MSR );
2969 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PAT_MSR );
2970 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_EFER_MSR );
2971 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_EFER_MSR );
2972 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER );
2973 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR );
2974 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT );
2975 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CLEAR_RTIT_CTL_MSR );
2976 }
2977}
2978#endif
2979
2980
2981/**
2982 * Check fatal VT-x/AMD-V error and produce some meaningful
2983 * log release message.
2984 *
2985 * @param pVM The cross context VM structure.
2986 * @param iStatusCode VBox status code.
2987 */
2988VMMR3_INT_DECL(void) HMR3CheckError(PVM pVM, int iStatusCode)
2989{
2990 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2991 {
2992 /** @todo r=ramshankar: Are all EMTs out of ring-0 at this point!? If not, we
2993 * might be getting inaccurate values for non-guru'ing EMTs. */
2994 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
2995#ifdef TODO_9217_VMCSINFO
2996 PCVMXVMCSINFOSHARED pVmcsInfo = hmGetVmxActiveVmcsInfoShared(pVCpu);
2997#endif
2998 bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3;
2999 switch (iStatusCode)
3000 {
3001 case VERR_VMX_INVALID_VMCS_PTR:
3002 {
3003 LogRel(("HM: VERR_VMX_INVALID_VMCS_PTR:\n"));
3004 LogRel(("HM: CPU[%u] %s VMCS active\n", idCpu, fNstGstVmcsActive ? "Nested-guest" : "Guest"));
3005#ifdef TODO_9217_VMCSINFO
3006 LogRel(("HM: CPU[%u] Current pointer %#RHp vs %#RHp\n", idCpu, pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs,
3007 pVmcsInfo->HCPhysVmcs));
3008#endif
3009 LogRel(("HM: CPU[%u] Current VMCS version %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32VmcsRev));
3010 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
3011 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
3012 break;
3013 }
3014
3015 case VERR_VMX_UNABLE_TO_START_VM:
3016 {
3017 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM:\n"));
3018 LogRel(("HM: CPU[%u] %s VMCS active\n", idCpu, fNstGstVmcsActive ? "Nested-guest" : "Guest"));
3019 LogRel(("HM: CPU[%u] Instruction error %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32InstrError));
3020 LogRel(("HM: CPU[%u] Exit reason %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32ExitReason));
3021
3022 if ( pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS
3023 || pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS)
3024 {
3025 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
3026 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
3027 }
3028 else if (pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTLS)
3029 {
3030#ifdef TODO_9217_VMCSINFO
3031 hmR3CheckErrorLogVmcsCtls(idCpu, pVmcsInfo);
3032 LogRel(("HM: CPU[%u] HCPhysMsrBitmap %#RHp\n", idCpu, pVmcsInfo->HCPhysMsrBitmap));
3033 LogRel(("HM: CPU[%u] HCPhysGuestMsrLoad %#RHp\n", idCpu, pVmcsInfo->HCPhysGuestMsrLoad));
3034 LogRel(("HM: CPU[%u] HCPhysGuestMsrStore %#RHp\n", idCpu, pVmcsInfo->HCPhysGuestMsrStore));
3035 LogRel(("HM: CPU[%u] HCPhysHostMsrLoad %#RHp\n", idCpu, pVmcsInfo->HCPhysHostMsrLoad));
3036 LogRel(("HM: CPU[%u] cEntryMsrLoad %u\n", idCpu, pVmcsInfo->cEntryMsrLoad));
3037 LogRel(("HM: CPU[%u] cExitMsrStore %u\n", idCpu, pVmcsInfo->cExitMsrStore));
3038 LogRel(("HM: CPU[%u] cExitMsrLoad %u\n", idCpu, pVmcsInfo->cExitMsrLoad));
3039#endif
3040 }
3041 /** @todo Log VM-entry event injection control fields
3042 * VMX_VMCS_CTRL_ENTRY_IRQ_INFO, VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE
3043 * and VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH from the VMCS. */
3044 break;
3045 }
3046
3047 case VERR_VMX_INVALID_GUEST_STATE:
3048 {
3049 LogRel(("HM: VERR_VMX_INVALID_GUEST_STATE:\n"));
3050 LogRel(("HM: CPU[%u] HM error = %#RX32\n", idCpu, pVCpu->hm.s.u32HMError));
3051 LogRel(("HM: CPU[%u] Guest-intr. state = %#RX32\n", idCpu, pVCpu->hm.s.vmx.LastError.u32GuestIntrState));
3052#ifdef TODO_9217_VMCSINFO
3053 hmR3CheckErrorLogVmcsCtls(idCpu, pVmcsInfo);
3054#endif
3055 break;
3056 }
3057
3058 /* The guru will dump the HM error and exit history. Nothing extra to report for these errors. */
3059 case VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO:
3060 case VERR_VMX_INVALID_VMXON_PTR:
3061 case VERR_VMX_UNEXPECTED_EXIT:
3062 case VERR_VMX_INVALID_VMCS_FIELD:
3063 case VERR_SVM_UNKNOWN_EXIT:
3064 case VERR_SVM_UNEXPECTED_EXIT:
3065 case VERR_SVM_UNEXPECTED_PATCH_TYPE:
3066 case VERR_SVM_UNEXPECTED_XCPT_EXIT:
3067 case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE:
3068 break;
3069 }
3070 }
3071
3072 if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM)
3073 {
3074 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-1 %#RX32\n", pVM->hm.s.vmx.MsrsForRing3.EntryCtls.n.allowed1));
3075 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-0 %#RX32\n", pVM->hm.s.vmx.MsrsForRing3.EntryCtls.n.allowed0));
3076 }
3077 else if (iStatusCode == VERR_VMX_INVALID_VMXON_PTR)
3078 LogRel(("HM: HCPhysVmxEnableError = %#RHp\n", pVM->hm.s.vmx.HCPhysVmxEnableError));
3079}
3080
3081
3082/**
3083 * Execute state save operation.
3084 *
3085 * Save only data that cannot be re-loaded while entering HM ring-0 code. This
3086 * is because we always save the VM state from ring-3 and thus most HM state
3087 * will be re-synced dynamically at runtime and don't need to be part of the VM
3088 * saved state.
3089 *
3090 * @returns VBox status code.
3091 * @param pVM The cross context VM structure.
3092 * @param pSSM SSM operation handle.
3093 */
3094static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM)
3095{
3096 Log(("hmR3Save:\n"));
3097
3098 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3099 {
3100 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3101 Assert(!pVCpu->hm.s.Event.fPending);
3102 if (pVM->cpum.ro.GuestFeatures.fSvm)
3103 {
3104 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
3105 SSMR3PutBool(pSSM, pVmcbNstGstCache->fCacheValid);
3106 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdCRx);
3107 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrCRx);
3108 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdDRx);
3109 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrDRx);
3110 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16PauseFilterThreshold);
3111 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16PauseFilterCount);
3112 SSMR3PutU32(pSSM, pVmcbNstGstCache->u32InterceptXcpt);
3113 SSMR3PutU64(pSSM, pVmcbNstGstCache->u64InterceptCtrl);
3114 SSMR3PutU64(pSSM, pVmcbNstGstCache->u64TSCOffset);
3115 SSMR3PutBool(pSSM, pVmcbNstGstCache->fVIntrMasking);
3116 SSMR3PutBool(pSSM, pVmcbNstGstCache->fNestedPaging);
3117 SSMR3PutBool(pSSM, pVmcbNstGstCache->fLbrVirt);
3118 }
3119 }
3120
3121 /* Save the guest patch data. */
3122 SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem);
3123 SSMR3PutGCPtr(pSSM, pVM->hm.s.pFreeGuestPatchMem);
3124 SSMR3PutU32(pSSM, pVM->hm.s.cbGuestPatchMem);
3125
3126 /* Store all the guest patch records too. */
3127 int rc = SSMR3PutU32(pSSM, pVM->hm.s.cPatches);
3128 if (RT_FAILURE(rc))
3129 return rc;
3130
3131 for (uint32_t i = 0; i < pVM->hm.s.cPatches; i++)
3132 {
3133 AssertCompileSize(HMTPRINSTR, 4);
3134 PCHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
3135 SSMR3PutU32(pSSM, pPatch->Core.Key);
3136 SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
3137 SSMR3PutU32(pSSM, pPatch->cbOp);
3138 SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
3139 SSMR3PutU32(pSSM, pPatch->cbNewOp);
3140 SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
3141 SSMR3PutU32(pSSM, pPatch->uSrcOperand);
3142 SSMR3PutU32(pSSM, pPatch->uDstOperand);
3143 SSMR3PutU32(pSSM, pPatch->pJumpTarget);
3144 rc = SSMR3PutU32(pSSM, pPatch->cFaults);
3145 if (RT_FAILURE(rc))
3146 return rc;
3147 }
3148
3149 return VINF_SUCCESS;
3150}
3151
3152
3153/**
3154 * Execute state load operation.
3155 *
3156 * @returns VBox status code.
3157 * @param pVM The cross context VM structure.
3158 * @param pSSM SSM operation handle.
3159 * @param uVersion Data layout version.
3160 * @param uPass The data pass.
3161 */
3162static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3163{
3164 int rc;
3165
3166 LogFlowFunc(("uVersion=%u\n", uVersion));
3167 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
3168
3169 /*
3170 * Validate version.
3171 */
3172 if ( uVersion != HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT
3173 && uVersion != HM_SAVED_STATE_VERSION_TPR_PATCHING
3174 && uVersion != HM_SAVED_STATE_VERSION_NO_TPR_PATCHING
3175 && uVersion != HM_SAVED_STATE_VERSION_2_0_X)
3176 {
3177 AssertMsgFailed(("hmR3Load: Invalid version uVersion=%d!\n", uVersion));
3178 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3179 }
3180
3181 /*
3182 * Load per-VCPU state.
3183 */
3184 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3185 {
3186 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3187 if (uVersion >= HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT)
3188 {
3189 /* Load the SVM nested hw.virt state if the VM is configured for it. */
3190 if (pVM->cpum.ro.GuestFeatures.fSvm)
3191 {
3192 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
3193 SSMR3GetBool(pSSM, &pVmcbNstGstCache->fCacheValid);
3194 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdCRx);
3195 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrCRx);
3196 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdDRx);
3197 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrDRx);
3198 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16PauseFilterThreshold);
3199 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16PauseFilterCount);
3200 SSMR3GetU32(pSSM, &pVmcbNstGstCache->u32InterceptXcpt);
3201 SSMR3GetU64(pSSM, &pVmcbNstGstCache->u64InterceptCtrl);
3202 SSMR3GetU64(pSSM, &pVmcbNstGstCache->u64TSCOffset);
3203 SSMR3GetBool(pSSM, &pVmcbNstGstCache->fVIntrMasking);
3204 SSMR3GetBool(pSSM, &pVmcbNstGstCache->fNestedPaging);
3205 rc = SSMR3GetBool(pSSM, &pVmcbNstGstCache->fLbrVirt);
3206 AssertRCReturn(rc, rc);
3207 }
3208 }
3209 else
3210 {
3211 /* Pending HM event (obsolete for a long time since TPRM holds the info.) */
3212 SSMR3GetU32(pSSM, &pVCpu->hm.s.Event.fPending);
3213 SSMR3GetU32(pSSM, &pVCpu->hm.s.Event.u32ErrCode);
3214 SSMR3GetU64(pSSM, &pVCpu->hm.s.Event.u64IntInfo);
3215
3216 /* VMX fWasInRealMode related data. */
3217 uint32_t uDummy;
3218 SSMR3GetU32(pSSM, &uDummy);
3219 SSMR3GetU32(pSSM, &uDummy);
3220 rc = SSMR3GetU32(pSSM, &uDummy);
3221 AssertRCReturn(rc, rc);
3222 }
3223 }
3224
3225 /*
3226 * Load TPR patching data.
3227 */
3228 if (uVersion >= HM_SAVED_STATE_VERSION_TPR_PATCHING)
3229 {
3230 SSMR3GetGCPtr(pSSM, &pVM->hm.s.pGuestPatchMem);
3231 SSMR3GetGCPtr(pSSM, &pVM->hm.s.pFreeGuestPatchMem);
3232 SSMR3GetU32(pSSM, &pVM->hm.s.cbGuestPatchMem);
3233
3234 /* Fetch all TPR patch records. */
3235 rc = SSMR3GetU32(pSSM, &pVM->hm.s.cPatches);
3236 AssertRCReturn(rc, rc);
3237 for (uint32_t i = 0; i < pVM->hm.s.cPatches; i++)
3238 {
3239 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
3240 SSMR3GetU32(pSSM, &pPatch->Core.Key);
3241 SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
3242 SSMR3GetU32(pSSM, &pPatch->cbOp);
3243 SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
3244 SSMR3GetU32(pSSM, &pPatch->cbNewOp);
3245 SSM_GET_ENUM32_RET(pSSM, pPatch->enmType, HMTPRINSTR);
3246
3247 if (pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT)
3248 pVM->hm.s.fTPRPatchingActive = true;
3249 Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fTPRPatchingActive == false);
3250
3251 SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
3252 SSMR3GetU32(pSSM, &pPatch->uDstOperand);
3253 SSMR3GetU32(pSSM, &pPatch->cFaults);
3254 rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
3255 AssertRCReturn(rc, rc);
3256
3257 LogFlow(("hmR3Load: patch %d\n", i));
3258 LogFlow(("Key = %x\n", pPatch->Core.Key));
3259 LogFlow(("cbOp = %d\n", pPatch->cbOp));
3260 LogFlow(("cbNewOp = %d\n", pPatch->cbNewOp));
3261 LogFlow(("type = %d\n", pPatch->enmType));
3262 LogFlow(("srcop = %d\n", pPatch->uSrcOperand));
3263 LogFlow(("dstop = %d\n", pPatch->uDstOperand));
3264 LogFlow(("cFaults = %d\n", pPatch->cFaults));
3265 LogFlow(("target = %x\n", pPatch->pJumpTarget));
3266
3267 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
3268 AssertRCReturn(rc, rc);
3269 }
3270 }
3271
3272 return VINF_SUCCESS;
3273}
3274
3275
3276/**
3277 * Displays HM info.
3278 *
3279 * @param pVM The cross context VM structure.
3280 * @param pHlp The info helper functions.
3281 * @param pszArgs Arguments, ignored.
3282 */
3283static DECLCALLBACK(void) hmR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3284{
3285 NOREF(pszArgs);
3286 PVMCPU pVCpu = VMMGetCpu(pVM);
3287 if (!pVCpu)
3288 pVCpu = pVM->apCpusR3[0];
3289
3290 if (HMIsEnabled(pVM))
3291 {
3292 if (pVM->hm.s.vmx.fSupported)
3293 pHlp->pfnPrintf(pHlp, "CPU[%u]: VT-x info:\n", pVCpu->idCpu);
3294 else
3295 pHlp->pfnPrintf(pHlp, "CPU[%u]: AMD-V info:\n", pVCpu->idCpu);
3296 pHlp->pfnPrintf(pHlp, " HM error = %#x (%u)\n", pVCpu->hm.s.u32HMError, pVCpu->hm.s.u32HMError);
3297 pHlp->pfnPrintf(pHlp, " rcLastExitToR3 = %Rrc\n", pVCpu->hm.s.rcLastExitToR3);
3298 if (pVM->hm.s.vmx.fSupported)
3299 {
3300 PCVMXVMCSINFOSHARED pVmcsInfoShared = hmGetVmxActiveVmcsInfoShared(pVCpu);
3301 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3302 bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3;
3303
3304 pHlp->pfnPrintf(pHlp, " %s VMCS active\n", fNstGstVmcsActive ? "Nested-guest" : "Guest");
3305 pHlp->pfnPrintf(pHlp, " Real-on-v86 active = %RTbool\n", fRealOnV86Active);
3306 if (fRealOnV86Active)
3307 {
3308 pHlp->pfnPrintf(pHlp, " EFlags = %#x\n", pVmcsInfoShared->RealMode.Eflags.u32);
3309 pHlp->pfnPrintf(pHlp, " Attr CS = %#x\n", pVmcsInfoShared->RealMode.AttrCS.u);
3310 pHlp->pfnPrintf(pHlp, " Attr SS = %#x\n", pVmcsInfoShared->RealMode.AttrSS.u);
3311 pHlp->pfnPrintf(pHlp, " Attr DS = %#x\n", pVmcsInfoShared->RealMode.AttrDS.u);
3312 pHlp->pfnPrintf(pHlp, " Attr ES = %#x\n", pVmcsInfoShared->RealMode.AttrES.u);
3313 pHlp->pfnPrintf(pHlp, " Attr FS = %#x\n", pVmcsInfoShared->RealMode.AttrFS.u);
3314 pHlp->pfnPrintf(pHlp, " Attr GS = %#x\n", pVmcsInfoShared->RealMode.AttrGS.u);
3315 }
3316 }
3317 }
3318 else
3319 pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
3320}
3321
3322
3323/**
3324 * Displays the HM Last-Branch-Record info. for the guest.
3325 *
3326 * @param pVM The cross context VM structure.
3327 * @param pHlp The info helper functions.
3328 * @param pszArgs Arguments, ignored.
3329 */
3330static DECLCALLBACK(void) hmR3InfoLbr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3331{
3332 NOREF(pszArgs);
3333 PVMCPU pVCpu = VMMGetCpu(pVM);
3334 if (!pVCpu)
3335 pVCpu = pVM->apCpusR3[0];
3336
3337 if (!HMIsEnabled(pVM))
3338 pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
3339
3340 if (HMIsVmxActive(pVM))
3341 {
3342 if (pVM->hm.s.vmx.fLbr)
3343 {
3344 PCVMXVMCSINFOSHARED pVmcsInfoShared = hmGetVmxActiveVmcsInfoShared(pVCpu);
3345 uint32_t const cLbrStack = pVM->hm.s.vmx.idLbrFromIpMsrLast - pVM->hm.s.vmx.idLbrFromIpMsrFirst + 1;
3346
3347 /** @todo r=ramshankar: The index technically varies depending on the CPU, but
3348 * 0xf should cover everything we support thus far. Fix if necessary
3349 * later. */
3350 uint32_t const idxTopOfStack = pVmcsInfoShared->u64LbrTosMsr & 0xf;
3351 if (idxTopOfStack > cLbrStack)
3352 {
3353 pHlp->pfnPrintf(pHlp, "Top-of-stack LBR MSR seems corrupt (index=%u, msr=%#RX64) expected index < %u\n",
3354 idxTopOfStack, pVmcsInfoShared->u64LbrTosMsr, cLbrStack);
3355 return;
3356 }
3357
3358 /*
3359 * Dump the circular buffer of LBR records starting from the most recent record (contained in idxTopOfStack).
3360 */
3361 pHlp->pfnPrintf(pHlp, "CPU[%u]: LBRs (most-recent first)\n", pVCpu->idCpu);
3362 uint32_t idxCurrent = idxTopOfStack;
3363 Assert(idxTopOfStack < cLbrStack);
3364 Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr) <= cLbrStack);
3365 Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrToIpMsr) <= cLbrStack);
3366 for (;;)
3367 {
3368 if (pVM->hm.s.vmx.idLbrToIpMsrFirst)
3369 {
3370 pHlp->pfnPrintf(pHlp, " Branch (%2u): From IP=%#016RX64 - To IP=%#016RX64\n", idxCurrent,
3371 pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent], pVmcsInfoShared->au64LbrToIpMsr[idxCurrent]);
3372 }
3373 else
3374 pHlp->pfnPrintf(pHlp, " Branch (%2u): LBR=%#RX64\n", idxCurrent, pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent]);
3375
3376 idxCurrent = (idxCurrent - 1) % cLbrStack;
3377 if (idxCurrent == idxTopOfStack)
3378 break;
3379 }
3380 }
3381 else
3382 pHlp->pfnPrintf(pHlp, "VM not configured to record LBRs for the guest\n");
3383 }
3384 else
3385 {
3386 Assert(HMIsSvmActive(pVM));
3387 /** @todo SVM: LBRs (get them from VMCB if possible). */
3388 pHlp->pfnPrintf(pHlp, "SVM LBR not implemented in VM debugger yet\n");
3389 }
3390}
3391
3392
3393/**
3394 * Displays the HM pending event.
3395 *
3396 * @param pVM The cross context VM structure.
3397 * @param pHlp The info helper functions.
3398 * @param pszArgs Arguments, ignored.
3399 */
3400static DECLCALLBACK(void) hmR3InfoEventPending(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3401{
3402 NOREF(pszArgs);
3403 PVMCPU pVCpu = VMMGetCpu(pVM);
3404 if (!pVCpu)
3405 pVCpu = pVM->apCpusR3[0];
3406
3407 if (HMIsEnabled(pVM))
3408 {
3409 pHlp->pfnPrintf(pHlp, "CPU[%u]: HM event (fPending=%RTbool)\n", pVCpu->idCpu, pVCpu->hm.s.Event.fPending);
3410 if (pVCpu->hm.s.Event.fPending)
3411 {
3412 pHlp->pfnPrintf(pHlp, " u64IntInfo = %#RX64\n", pVCpu->hm.s.Event.u64IntInfo);
3413 pHlp->pfnPrintf(pHlp, " u32ErrCode = %#RX64\n", pVCpu->hm.s.Event.u32ErrCode);
3414 pHlp->pfnPrintf(pHlp, " cbInstr = %u bytes\n", pVCpu->hm.s.Event.cbInstr);
3415 pHlp->pfnPrintf(pHlp, " GCPtrFaultAddress = %#RGp\n", pVCpu->hm.s.Event.GCPtrFaultAddress);
3416 }
3417 }
3418 else
3419 pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
3420}
3421
3422
3423/**
3424 * Displays the SVM nested-guest VMCB cache.
3425 *
3426 * @param pVM The cross context VM structure.
3427 * @param pHlp The info helper functions.
3428 * @param pszArgs Arguments, ignored.
3429 */
3430static DECLCALLBACK(void) hmR3InfoSvmNstGstVmcbCache(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3431{
3432 NOREF(pszArgs);
3433 PVMCPU pVCpu = VMMGetCpu(pVM);
3434 if (!pVCpu)
3435 pVCpu = pVM->apCpusR3[0];
3436
3437 bool const fSvmEnabled = HMR3IsSvmEnabled(pVM->pUVM);
3438 if ( fSvmEnabled
3439 && pVM->cpum.ro.GuestFeatures.fSvm)
3440 {
3441 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
3442 pHlp->pfnPrintf(pHlp, "CPU[%u]: HM SVM nested-guest VMCB cache\n", pVCpu->idCpu);
3443 pHlp->pfnPrintf(pHlp, " fCacheValid = %#RTbool\n", pVmcbNstGstCache->fCacheValid);
3444 pHlp->pfnPrintf(pHlp, " u16InterceptRdCRx = %#RX16\n", pVmcbNstGstCache->u16InterceptRdCRx);
3445 pHlp->pfnPrintf(pHlp, " u16InterceptWrCRx = %#RX16\n", pVmcbNstGstCache->u16InterceptWrCRx);
3446 pHlp->pfnPrintf(pHlp, " u16InterceptRdDRx = %#RX16\n", pVmcbNstGstCache->u16InterceptRdDRx);
3447 pHlp->pfnPrintf(pHlp, " u16InterceptWrDRx = %#RX16\n", pVmcbNstGstCache->u16InterceptWrDRx);
3448 pHlp->pfnPrintf(pHlp, " u16PauseFilterThreshold = %#RX16\n", pVmcbNstGstCache->u16PauseFilterThreshold);
3449 pHlp->pfnPrintf(pHlp, " u16PauseFilterCount = %#RX16\n", pVmcbNstGstCache->u16PauseFilterCount);
3450 pHlp->pfnPrintf(pHlp, " u32InterceptXcpt = %#RX32\n", pVmcbNstGstCache->u32InterceptXcpt);
3451 pHlp->pfnPrintf(pHlp, " u64InterceptCtrl = %#RX64\n", pVmcbNstGstCache->u64InterceptCtrl);
3452 pHlp->pfnPrintf(pHlp, " u64TSCOffset = %#RX64\n", pVmcbNstGstCache->u64TSCOffset);
3453 pHlp->pfnPrintf(pHlp, " fVIntrMasking = %RTbool\n", pVmcbNstGstCache->fVIntrMasking);
3454 pHlp->pfnPrintf(pHlp, " fNestedPaging = %RTbool\n", pVmcbNstGstCache->fNestedPaging);
3455 pHlp->pfnPrintf(pHlp, " fLbrVirt = %RTbool\n", pVmcbNstGstCache->fLbrVirt);
3456 }
3457 else
3458 {
3459 if (!fSvmEnabled)
3460 pHlp->pfnPrintf(pHlp, "HM SVM is not enabled for this VM!\n");
3461 else
3462 pHlp->pfnPrintf(pHlp, "SVM feature is not exposed to the guest!\n");
3463 }
3464}
3465
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette