VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/HM.cpp@ 93554

Last change on this file since 93554 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 156.2 KB
Line 
1/* $Id: HM.cpp 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * HM - Intel/AMD VM Hardware Support Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_hm HM - Hardware Assisted Virtualization Manager
19 *
20 * The HM manages guest execution using the VT-x and AMD-V CPU hardware
21 * extensions.
22 *
23 * {summary of what HM does}
24 *
25 * Hardware assisted virtualization manager was originally abbreviated HWACCM,
26 * however that was cumbersome to write and parse for such a central component,
27 * so it was shortened to HM when refactoring the code in the 4.3 development
28 * cycle.
29 *
30 * {add sections with more details}
31 *
32 * @sa @ref grp_hm
33 */
34
35
36/*********************************************************************************************************************************
37* Header Files *
38*********************************************************************************************************************************/
39#define LOG_GROUP LOG_GROUP_HM
40#define VMCPU_INCL_CPUM_GST_CTX
41#include <VBox/vmm/cpum.h>
42#include <VBox/vmm/stam.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/pdmapi.h>
45#include <VBox/vmm/pgm.h>
46#include <VBox/vmm/ssm.h>
47#include <VBox/vmm/gim.h>
48#include <VBox/vmm/trpm.h>
49#include <VBox/vmm/dbgf.h>
50#include <VBox/vmm/iom.h>
51#include <VBox/vmm/iem.h>
52#include <VBox/vmm/selm.h>
53#include <VBox/vmm/nem.h>
54#include <VBox/vmm/hm_vmx.h>
55#include <VBox/vmm/hm_svm.h>
56#include "HMInternal.h"
57#include <VBox/vmm/vmcc.h>
58#include <VBox/err.h>
59#include <VBox/param.h>
60
61#include <iprt/assert.h>
62#include <VBox/log.h>
63#include <iprt/asm.h>
64#include <iprt/asm-amd64-x86.h>
65#include <iprt/env.h>
66#include <iprt/thread.h>
67
68
69/*********************************************************************************************************************************
70* Defined Constants And Macros *
71*********************************************************************************************************************************/
72/** @def HMVMX_REPORT_FEAT
73 * Reports VT-x feature to the release log.
74 *
75 * @param a_uAllowed1 Mask of allowed-1 feature bits.
76 * @param a_uAllowed0 Mask of allowed-0 feature bits.
77 * @param a_StrDesc The description string to report.
78 * @param a_Featflag Mask of the feature to report.
79 */
80#define HMVMX_REPORT_FEAT(a_uAllowed1, a_uAllowed0, a_StrDesc, a_Featflag) \
81 do { \
82 if ((a_uAllowed1) & (a_Featflag)) \
83 { \
84 if ((a_uAllowed0) & (a_Featflag)) \
85 LogRel(("HM: " a_StrDesc " (must be set)\n")); \
86 else \
87 LogRel(("HM: " a_StrDesc "\n")); \
88 } \
89 else \
90 LogRel(("HM: " a_StrDesc " (must be cleared)\n")); \
91 } while (0)
92
93/** @def HMVMX_REPORT_ALLOWED_FEAT
94 * Reports an allowed VT-x feature to the release log.
95 *
96 * @param a_uAllowed1 Mask of allowed-1 feature bits.
97 * @param a_StrDesc The description string to report.
98 * @param a_FeatFlag Mask of the feature to report.
99 */
100#define HMVMX_REPORT_ALLOWED_FEAT(a_uAllowed1, a_StrDesc, a_FeatFlag) \
101 do { \
102 if ((a_uAllowed1) & (a_FeatFlag)) \
103 LogRel(("HM: " a_StrDesc "\n")); \
104 else \
105 LogRel(("HM: " a_StrDesc " not supported\n")); \
106 } while (0)
107
108/** @def HMVMX_REPORT_MSR_CAP
109 * Reports MSR feature capability.
110 *
111 * @param a_MsrCaps Mask of MSR feature bits.
112 * @param a_StrDesc The description string to report.
113 * @param a_fCap Mask of the feature to report.
114 */
115#define HMVMX_REPORT_MSR_CAP(a_MsrCaps, a_StrDesc, a_fCap) \
116 do { \
117 if ((a_MsrCaps) & (a_fCap)) \
118 LogRel(("HM: " a_StrDesc "\n")); \
119 } while (0)
120
121/** @def HMVMX_LOGREL_FEAT
122 * Dumps a feature flag from a bitmap of features to the release log.
123 *
124 * @param a_fVal The value of all the features.
125 * @param a_fMask The specific bitmask of the feature.
126 */
127#define HMVMX_LOGREL_FEAT(a_fVal, a_fMask) \
128 do { \
129 if ((a_fVal) & (a_fMask)) \
130 LogRel(("HM: %s\n", #a_fMask)); \
131 } while (0)
132
133
134/*********************************************************************************************************************************
135* Internal Functions *
136*********************************************************************************************************************************/
137static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM);
138static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
139static DECLCALLBACK(void) hmR3InfoSvmNstGstVmcbCache(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
140static DECLCALLBACK(void) hmR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
141static DECLCALLBACK(void) hmR3InfoEventPending(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
142static DECLCALLBACK(void) hmR3InfoLbr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
143static int hmR3InitFinalizeR3(PVM pVM);
144static int hmR3InitFinalizeR0(PVM pVM);
145static int hmR3InitFinalizeR0Intel(PVM pVM);
146static int hmR3InitFinalizeR0Amd(PVM pVM);
147static int hmR3TermCPU(PVM pVM);
148
149
150#ifdef VBOX_WITH_STATISTICS
151/**
152 * Returns the name of the hardware exception.
153 *
154 * @returns The name of the hardware exception.
155 * @param uVector The exception vector.
156 */
157static const char *hmR3GetXcptName(uint8_t uVector)
158{
159 switch (uVector)
160 {
161 case X86_XCPT_DE: return "#DE";
162 case X86_XCPT_DB: return "#DB";
163 case X86_XCPT_NMI: return "#NMI";
164 case X86_XCPT_BP: return "#BP";
165 case X86_XCPT_OF: return "#OF";
166 case X86_XCPT_BR: return "#BR";
167 case X86_XCPT_UD: return "#UD";
168 case X86_XCPT_NM: return "#NM";
169 case X86_XCPT_DF: return "#DF";
170 case X86_XCPT_CO_SEG_OVERRUN: return "#CO_SEG_OVERRUN";
171 case X86_XCPT_TS: return "#TS";
172 case X86_XCPT_NP: return "#NP";
173 case X86_XCPT_SS: return "#SS";
174 case X86_XCPT_GP: return "#GP";
175 case X86_XCPT_PF: return "#PF";
176 case X86_XCPT_MF: return "#MF";
177 case X86_XCPT_AC: return "#AC";
178 case X86_XCPT_MC: return "#MC";
179 case X86_XCPT_XF: return "#XF";
180 case X86_XCPT_VE: return "#VE";
181 case X86_XCPT_CP: return "#CP";
182 case X86_XCPT_VC: return "#VC";
183 case X86_XCPT_SX: return "#SX";
184 }
185 return "Reserved";
186}
187#endif /* VBOX_WITH_STATISTICS */
188
189
190/**
191 * Initializes the HM.
192 *
193 * This is the very first component to really do init after CFGM so that we can
194 * establish the predominant execution engine for the VM prior to initializing
195 * other modules. It takes care of NEM initialization if needed (HM disabled or
196 * not available in HW).
197 *
198 * If VT-x or AMD-V hardware isn't available, HM will try fall back on a native
199 * hypervisor API via NEM, and then back on raw-mode if that isn't available
200 * either. The fallback to raw-mode will not happen if /HM/HMForced is set
201 * (like for guest using SMP or 64-bit as well as for complicated guest like OS
202 * X, OS/2 and others).
203 *
204 * Note that a lot of the set up work is done in ring-0 and thus postponed till
205 * the ring-3 and ring-0 callback to HMR3InitCompleted.
206 *
207 * @returns VBox status code.
208 * @param pVM The cross context VM structure.
209 *
210 * @remarks Be careful with what we call here, since most of the VMM components
211 * are uninitialized.
212 */
213VMMR3_INT_DECL(int) HMR3Init(PVM pVM)
214{
215 LogFlowFunc(("\n"));
216
217 /*
218 * Assert alignment and sizes.
219 */
220 AssertCompileMemberAlignment(VM, hm.s, 32);
221 AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding));
222
223 /*
224 * Register the saved state data unit.
225 */
226 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HM_SAVED_STATE_VERSION, sizeof(HM),
227 NULL, NULL, NULL,
228 NULL, hmR3Save, NULL,
229 NULL, hmR3Load, NULL);
230 if (RT_FAILURE(rc))
231 return rc;
232
233 /*
234 * Register info handlers.
235 */
236 rc = DBGFR3InfoRegisterInternalEx(pVM, "hm", "Dumps HM info.", hmR3Info, DBGFINFO_FLAGS_ALL_EMTS);
237 AssertRCReturn(rc, rc);
238
239 rc = DBGFR3InfoRegisterInternalEx(pVM, "hmeventpending", "Dumps the pending HM event.", hmR3InfoEventPending,
240 DBGFINFO_FLAGS_ALL_EMTS);
241 AssertRCReturn(rc, rc);
242
243 rc = DBGFR3InfoRegisterInternalEx(pVM, "svmvmcbcache", "Dumps the HM SVM nested-guest VMCB cache.",
244 hmR3InfoSvmNstGstVmcbCache, DBGFINFO_FLAGS_ALL_EMTS);
245 AssertRCReturn(rc, rc);
246
247 rc = DBGFR3InfoRegisterInternalEx(pVM, "lbr", "Dumps the HM LBR info.", hmR3InfoLbr, DBGFINFO_FLAGS_ALL_EMTS);
248 AssertRCReturn(rc, rc);
249
250 /*
251 * Read configuration.
252 */
253 PCFGMNODE pCfgHm = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HM/");
254
255 /*
256 * Validate the HM settings.
257 */
258 rc = CFGMR3ValidateConfig(pCfgHm, "/HM/",
259 "HMForced" /* implied 'true' these days */
260 "|UseNEMInstead"
261 "|FallbackToNEM"
262 "|EnableNestedPaging"
263 "|EnableUX"
264 "|EnableLargePages"
265 "|EnableVPID"
266 "|IBPBOnVMExit"
267 "|IBPBOnVMEntry"
268 "|SpecCtrlByHost"
269 "|L1DFlushOnSched"
270 "|L1DFlushOnVMEntry"
271 "|MDSClearOnSched"
272 "|MDSClearOnVMEntry"
273 "|TPRPatchingEnabled"
274 "|64bitEnabled"
275 "|Exclusive"
276 "|MaxResumeLoops"
277 "|VmxPleGap"
278 "|VmxPleWindow"
279 "|VmxLbr"
280 "|UseVmxPreemptTimer"
281 "|SvmPauseFilter"
282 "|SvmPauseFilterThreshold"
283 "|SvmVirtVmsaveVmload"
284 "|SvmVGif"
285 "|LovelyMesaDrvWorkaround"
286 "|MissingOS2TlbFlushWorkaround",
287 "" /* pszValidNodes */, "HM" /* pszWho */, 0 /* uInstance */);
288 if (RT_FAILURE(rc))
289 return rc;
290
291 /** @cfgm{/HM/HMForced, bool, false}
292 * Forces hardware virtualization, no falling back on raw-mode. HM must be
293 * enabled, i.e. /HMEnabled must be true. */
294 bool fHMForced;
295 AssertRelease(pVM->fHMEnabled);
296 fHMForced = true;
297
298 /** @cfgm{/HM/UseNEMInstead, bool, true}
299 * Don't use HM, use NEM instead. */
300 bool fUseNEMInstead = false;
301 rc = CFGMR3QueryBoolDef(pCfgHm, "UseNEMInstead", &fUseNEMInstead, false);
302 AssertRCReturn(rc, rc);
303 if (fUseNEMInstead && pVM->fHMEnabled)
304 {
305 LogRel(("HM: Setting fHMEnabled to false because fUseNEMInstead is set.\n"));
306 pVM->fHMEnabled = false;
307 }
308
309 /** @cfgm{/HM/FallbackToNEM, bool, true}
310 * Enables fallback on NEM. */
311 bool fFallbackToNEM = true;
312 rc = CFGMR3QueryBoolDef(pCfgHm, "FallbackToNEM", &fFallbackToNEM, true);
313 AssertRCReturn(rc, rc);
314
315 /** @cfgm{/HM/EnableNestedPaging, bool, false}
316 * Enables nested paging (aka extended page tables). */
317 bool fAllowNestedPaging = false;
318 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableNestedPaging", &fAllowNestedPaging, false);
319 AssertRCReturn(rc, rc);
320
321 /** @cfgm{/HM/EnableUX, bool, true}
322 * Enables the VT-x unrestricted execution feature. */
323 bool fAllowUnrestricted = true;
324 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableUX", &fAllowUnrestricted, true);
325 AssertRCReturn(rc, rc);
326
327 /** @cfgm{/HM/EnableLargePages, bool, false}
328 * Enables using large pages (2 MB) for guest memory, thus saving on (nested)
329 * page table walking and maybe better TLB hit rate in some cases. */
330 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableLargePages", &pVM->hm.s.fLargePages, false);
331 AssertRCReturn(rc, rc);
332
333 /** @cfgm{/HM/EnableVPID, bool, false}
334 * Enables the VT-x VPID feature. */
335 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableVPID", &pVM->hm.s.vmx.fAllowVpid, false);
336 AssertRCReturn(rc, rc);
337
338 /** @cfgm{/HM/TPRPatchingEnabled, bool, false}
339 * Enables TPR patching for 32-bit windows guests with IO-APIC. */
340 rc = CFGMR3QueryBoolDef(pCfgHm, "TPRPatchingEnabled", &pVM->hm.s.fTprPatchingAllowed, false);
341 AssertRCReturn(rc, rc);
342
343 /** @cfgm{/HM/64bitEnabled, bool, 32-bit:false, 64-bit:true}
344 * Enables AMD64 cpu features.
345 * On 32-bit hosts this isn't default and require host CPU support. 64-bit hosts
346 * already have the support. */
347#ifdef VBOX_WITH_64_BITS_GUESTS
348 rc = CFGMR3QueryBoolDef(pCfgHm, "64bitEnabled", &pVM->hm.s.fAllow64BitGuestsCfg, HC_ARCH_BITS == 64);
349 AssertLogRelRCReturn(rc, rc);
350#else
351 pVM->hm.s.fAllow64BitGuestsCfg = false;
352#endif
353
354 /** @cfgm{/HM/VmxPleGap, uint32_t, 0}
355 * The pause-filter exiting gap in TSC ticks. When the number of ticks between
356 * two successive PAUSE instructions exceeds VmxPleGap, the CPU considers the
357 * latest PAUSE instruction to be start of a new PAUSE loop.
358 */
359 rc = CFGMR3QueryU32Def(pCfgHm, "VmxPleGap", &pVM->hm.s.vmx.cPleGapTicks, 0);
360 AssertRCReturn(rc, rc);
361
362 /** @cfgm{/HM/VmxPleWindow, uint32_t, 0}
363 * The pause-filter exiting window in TSC ticks. When the number of ticks
364 * between the current PAUSE instruction and first PAUSE of a loop exceeds
365 * VmxPleWindow, a VM-exit is triggered.
366 *
367 * Setting VmxPleGap and VmxPleGap to 0 disables pause-filter exiting.
368 */
369 rc = CFGMR3QueryU32Def(pCfgHm, "VmxPleWindow", &pVM->hm.s.vmx.cPleWindowTicks, 0);
370 AssertRCReturn(rc, rc);
371
372 /** @cfgm{/HM/VmxLbr, bool, false}
373 * Whether to enable LBR for the guest. This is disabled by default as it's only
374 * useful while debugging and enabling it causes a noticeable performance hit. */
375 rc = CFGMR3QueryBoolDef(pCfgHm, "VmxLbr", &pVM->hm.s.vmx.fLbrCfg, false);
376 AssertRCReturn(rc, rc);
377
378 /** @cfgm{/HM/SvmPauseFilterCount, uint16_t, 0}
379 * A counter that is decrement each time a PAUSE instruction is executed by the
380 * guest. When the counter is 0, a \#VMEXIT is triggered.
381 *
382 * Setting SvmPauseFilterCount to 0 disables pause-filter exiting.
383 */
384 rc = CFGMR3QueryU16Def(pCfgHm, "SvmPauseFilter", &pVM->hm.s.svm.cPauseFilter, 0);
385 AssertRCReturn(rc, rc);
386
387 /** @cfgm{/HM/SvmPauseFilterThreshold, uint16_t, 0}
388 * The pause filter threshold in ticks. When the elapsed time (in ticks) between
389 * two successive PAUSE instructions exceeds SvmPauseFilterThreshold, the
390 * PauseFilter count is reset to its initial value. However, if PAUSE is
391 * executed PauseFilter times within PauseFilterThreshold ticks, a VM-exit will
392 * be triggered.
393 *
394 * Requires SvmPauseFilterCount to be non-zero for pause-filter threshold to be
395 * activated.
396 */
397 rc = CFGMR3QueryU16Def(pCfgHm, "SvmPauseFilterThreshold", &pVM->hm.s.svm.cPauseFilterThresholdTicks, 0);
398 AssertRCReturn(rc, rc);
399
400 /** @cfgm{/HM/SvmVirtVmsaveVmload, bool, true}
401 * Whether to make use of virtualized VMSAVE/VMLOAD feature of the CPU if it's
402 * available. */
403 rc = CFGMR3QueryBoolDef(pCfgHm, "SvmVirtVmsaveVmload", &pVM->hm.s.svm.fVirtVmsaveVmload, true);
404 AssertRCReturn(rc, rc);
405
406 /** @cfgm{/HM/SvmVGif, bool, true}
407 * Whether to make use of Virtual GIF (Global Interrupt Flag) feature of the CPU
408 * if it's available. */
409 rc = CFGMR3QueryBoolDef(pCfgHm, "SvmVGif", &pVM->hm.s.svm.fVGif, true);
410 AssertRCReturn(rc, rc);
411
412 /** @cfgm{/HM/SvmLbrVirt, bool, false}
413 * Whether to make use of the LBR virtualization feature of the CPU if it's
414 * available. This is disabled by default as it's only useful while debugging
415 * and enabling it causes a small hit to performance. */
416 rc = CFGMR3QueryBoolDef(pCfgHm, "SvmLbrVirt", &pVM->hm.s.svm.fLbrVirt, false);
417 AssertRCReturn(rc, rc);
418
419 /** @cfgm{/HM/Exclusive, bool}
420 * Determines the init method for AMD-V and VT-x. If set to true, HM will do a
421 * global init for each host CPU. If false, we do local init each time we wish
422 * to execute guest code.
423 *
424 * On Windows, default is false due to the higher risk of conflicts with other
425 * hypervisors.
426 *
427 * On Mac OS X, this setting is ignored since the code does not handle local
428 * init when it utilizes the OS provided VT-x function, SUPR0EnableVTx().
429 */
430#if defined(RT_OS_DARWIN)
431 pVM->hm.s.fGlobalInit = true;
432#else
433 rc = CFGMR3QueryBoolDef(pCfgHm, "Exclusive", &pVM->hm.s.fGlobalInit,
434# if defined(RT_OS_WINDOWS)
435 false
436# else
437 true
438# endif
439 );
440 AssertLogRelRCReturn(rc, rc);
441#endif
442
443 /** @cfgm{/HM/MaxResumeLoops, uint32_t}
444 * The number of times to resume guest execution before we forcibly return to
445 * ring-3. The return value of RTThreadPreemptIsPendingTrusty in ring-0
446 * determines the default value. */
447 rc = CFGMR3QueryU32Def(pCfgHm, "MaxResumeLoops", &pVM->hm.s.cMaxResumeLoopsCfg, 0 /* set by R0 later */);
448 AssertLogRelRCReturn(rc, rc);
449
450 /** @cfgm{/HM/UseVmxPreemptTimer, bool}
451 * Whether to make use of the VMX-preemption timer feature of the CPU if it's
452 * available. */
453 rc = CFGMR3QueryBoolDef(pCfgHm, "UseVmxPreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimerCfg, true);
454 AssertLogRelRCReturn(rc, rc);
455
456 /** @cfgm{/HM/IBPBOnVMExit, bool}
457 * Costly paranoia setting. */
458 rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMExit", &pVM->hm.s.fIbpbOnVmExit, false);
459 AssertLogRelRCReturn(rc, rc);
460
461 /** @cfgm{/HM/IBPBOnVMEntry, bool}
462 * Costly paranoia setting. */
463 rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMEntry", &pVM->hm.s.fIbpbOnVmEntry, false);
464 AssertLogRelRCReturn(rc, rc);
465
466 /** @cfgm{/HM/L1DFlushOnSched, bool, true}
467 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */
468 rc = CFGMR3QueryBoolDef(pCfgHm, "L1DFlushOnSched", &pVM->hm.s.fL1dFlushOnSched, true);
469 AssertLogRelRCReturn(rc, rc);
470
471 /** @cfgm{/HM/L1DFlushOnVMEntry, bool}
472 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */
473 rc = CFGMR3QueryBoolDef(pCfgHm, "L1DFlushOnVMEntry", &pVM->hm.s.fL1dFlushOnVmEntry, false);
474 AssertLogRelRCReturn(rc, rc);
475
476 /* Disable L1DFlushOnSched if L1DFlushOnVMEntry is enabled. */
477 if (pVM->hm.s.fL1dFlushOnVmEntry)
478 pVM->hm.s.fL1dFlushOnSched = false;
479
480 /** @cfgm{/HM/SpecCtrlByHost, bool}
481 * Another expensive paranoia setting. */
482 rc = CFGMR3QueryBoolDef(pCfgHm, "SpecCtrlByHost", &pVM->hm.s.fSpecCtrlByHost, false);
483 AssertLogRelRCReturn(rc, rc);
484
485 /** @cfgm{/HM/MDSClearOnSched, bool, true}
486 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround,
487 * ignored on CPUs that aren't affected. */
488 rc = CFGMR3QueryBoolDef(pCfgHm, "MDSClearOnSched", &pVM->hm.s.fMdsClearOnSched, true);
489 AssertLogRelRCReturn(rc, rc);
490
491 /** @cfgm{/HM/MDSClearOnVmEntry, bool, false}
492 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround,
493 * ignored on CPUs that aren't affected. */
494 rc = CFGMR3QueryBoolDef(pCfgHm, "MDSClearOnVmEntry", &pVM->hm.s.fMdsClearOnVmEntry, false);
495 AssertLogRelRCReturn(rc, rc);
496
497 /* Disable MDSClearOnSched if MDSClearOnVmEntry is enabled. */
498 if (pVM->hm.s.fMdsClearOnVmEntry)
499 pVM->hm.s.fMdsClearOnSched = false;
500
501 /** @cfgm{/HM/LovelyMesaDrvWorkaround,bool}
502 * Workaround for mesa vmsvga 3d driver making incorrect assumptions about
503 * the hypervisor it is running under. */
504 bool fMesaWorkaround;
505 rc = CFGMR3QueryBoolDef(pCfgHm, "LovelyMesaDrvWorkaround", &fMesaWorkaround, false);
506 AssertLogRelRCReturn(rc, rc);
507 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
508 {
509 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
510 pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv = fMesaWorkaround;
511 }
512
513 /** @cfgm{/HM/MissingOS2TlbFlushWorkaround,bool}
514 * Workaround OS/2 not flushing the TLB after page directory and page table
515 * modifications when returning to protected mode from a real mode call
516 * (TESTCFG.SYS typically crashes). See ticketref:20625 for details. */
517 rc = CFGMR3QueryBoolDef(pCfgHm, "MissingOS2TlbFlushWorkaround", &pVM->hm.s.fMissingOS2TlbFlushWorkaround, false);
518 AssertLogRelRCReturn(rc, rc);
519
520 /*
521 * Check if VT-x or AMD-v support according to the users wishes.
522 */
523 /** @todo SUPR3QueryVTCaps won't catch VERR_VMX_IN_VMX_ROOT_MODE or
524 * VERR_SVM_IN_USE. */
525 if (pVM->fHMEnabled)
526 {
527 uint32_t fCaps;
528 rc = SUPR3QueryVTCaps(&fCaps);
529 if (RT_SUCCESS(rc))
530 {
531 if (fCaps & SUPVTCAPS_AMD_V)
532 {
533 pVM->hm.s.svm.fSupported = true;
534 LogRel(("HM: HMR3Init: AMD-V%s\n", fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : ""));
535 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_HW_VIRT);
536 }
537 else if (fCaps & SUPVTCAPS_VT_X)
538 {
539 const char *pszWhy;
540 rc = SUPR3QueryVTxSupported(&pszWhy);
541 if (RT_SUCCESS(rc))
542 {
543 pVM->hm.s.vmx.fSupported = true;
544 LogRel(("HM: HMR3Init: VT-x%s%s%s\n",
545 fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : "",
546 fCaps & SUPVTCAPS_VTX_UNRESTRICTED_GUEST ? " and unrestricted guest execution" : "",
547 (fCaps & (SUPVTCAPS_NESTED_PAGING | SUPVTCAPS_VTX_UNRESTRICTED_GUEST)) ? " hw support" : ""));
548 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_HW_VIRT);
549 }
550 else
551 {
552 /*
553 * Before failing, try fallback to NEM if we're allowed to do that.
554 */
555 pVM->fHMEnabled = false;
556 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET);
557 if (fFallbackToNEM)
558 {
559 LogRel(("HM: HMR3Init: Attempting fall back to NEM: The host kernel does not support VT-x - %s\n", pszWhy));
560 int rc2 = NEMR3Init(pVM, true /*fFallback*/, fHMForced);
561
562 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
563 if ( RT_SUCCESS(rc2)
564 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET)
565 rc = VINF_SUCCESS;
566 }
567 if (RT_FAILURE(rc))
568 return VMSetError(pVM, rc, RT_SRC_POS, "The host kernel does not support VT-x: %s\n", pszWhy);
569 }
570 }
571 else
572 AssertLogRelMsgFailedReturn(("SUPR3QueryVTCaps didn't return either AMD-V or VT-x flag set (%#x)!\n", fCaps),
573 VERR_INTERNAL_ERROR_5);
574
575 /*
576 * Disable nested paging and unrestricted guest execution now if they're
577 * configured so that CPUM can make decisions based on our configuration.
578 */
579 if ( fAllowNestedPaging
580 && (fCaps & SUPVTCAPS_NESTED_PAGING))
581 {
582 pVM->hm.s.fNestedPagingCfg = true;
583 if (fCaps & SUPVTCAPS_VT_X)
584 {
585 if ( fAllowUnrestricted
586 && (fCaps & SUPVTCAPS_VTX_UNRESTRICTED_GUEST))
587 pVM->hm.s.vmx.fUnrestrictedGuestCfg = true;
588 else
589 Assert(!pVM->hm.s.vmx.fUnrestrictedGuestCfg);
590 }
591 }
592 else
593 Assert(!pVM->hm.s.fNestedPagingCfg);
594 }
595 else
596 {
597 const char *pszMsg;
598 switch (rc)
599 {
600 case VERR_UNSUPPORTED_CPU: pszMsg = "Unknown CPU, VT-x or AMD-v features cannot be ascertained"; break;
601 case VERR_VMX_NO_VMX: pszMsg = "VT-x is not available"; break;
602 case VERR_VMX_MSR_VMX_DISABLED: pszMsg = "VT-x is disabled in the BIOS"; break;
603 case VERR_VMX_MSR_ALL_VMX_DISABLED: pszMsg = "VT-x is disabled in the BIOS for all CPU modes"; break;
604 case VERR_VMX_MSR_LOCKING_FAILED: pszMsg = "Failed to enable and lock VT-x features"; break;
605 case VERR_SVM_NO_SVM: pszMsg = "AMD-V is not available"; break;
606 case VERR_SVM_DISABLED: pszMsg = "AMD-V is disabled in the BIOS (or by the host OS)"; break;
607 case VERR_SUP_DRIVERLESS: pszMsg = "Driverless mode"; break;
608 default:
609 return VMSetError(pVM, rc, RT_SRC_POS, "SUPR3QueryVTCaps failed with %Rrc", rc);
610 }
611
612 /*
613 * Before failing, try fallback to NEM if we're allowed to do that.
614 */
615 pVM->fHMEnabled = false;
616 if (fFallbackToNEM)
617 {
618 LogRel(("HM: HMR3Init: Attempting fall back to NEM: %s\n", pszMsg));
619 int rc2 = NEMR3Init(pVM, true /*fFallback*/, fHMForced);
620 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
621 if ( RT_SUCCESS(rc2)
622 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET)
623 {
624 rc = VINF_SUCCESS;
625
626 /* For some reason, HM is in charge or large pages. Make sure to enable them: */
627 PGMSetLargePageUsage(pVM, pVM->hm.s.fLargePages);
628 }
629 }
630 if (RT_FAILURE(rc))
631 return VM_SET_ERROR(pVM, rc, pszMsg);
632 }
633 }
634 else
635 {
636 /*
637 * Disabled HM mean raw-mode, unless NEM is supposed to be used.
638 */
639 if (fUseNEMInstead)
640 {
641 rc = NEMR3Init(pVM, false /*fFallback*/, true);
642 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
643 if (RT_FAILURE(rc))
644 return rc;
645
646 /* For some reason, HM is in charge or large pages. Make sure to enable them: */
647 PGMSetLargePageUsage(pVM, pVM->hm.s.fLargePages);
648 }
649 if ( pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET
650 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_RAW_MODE
651 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT /* paranoia */)
652 return VM_SET_ERROR(pVM, rc, "Misconfigured VM: No guest execution engine available!");
653 }
654
655 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET);
656 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_RAW_MODE);
657 return VINF_SUCCESS;
658}
659
660
661/**
662 * Initializes HM components after ring-3 phase has been fully initialized.
663 *
664 * @returns VBox status code.
665 * @param pVM The cross context VM structure.
666 */
667static int hmR3InitFinalizeR3(PVM pVM)
668{
669 LogFlowFunc(("\n"));
670
671 if (!HMIsEnabled(pVM))
672 return VINF_SUCCESS;
673
674 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
675 {
676 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
677 pVCpu->hm.s.fActive = false;
678 pVCpu->hm.s.fGIMTrapXcptUD = GIMShouldTrapXcptUD(pVCpu); /* Is safe to call now since GIMR3Init() has completed. */
679 }
680
681 /*
682 * Check if L1D flush is needed/possible.
683 */
684 if ( !pVM->cpum.ro.HostFeatures.fFlushCmd
685 || pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem
686 || pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_End
687 || pVM->cpum.ro.HostFeatures.fArchVmmNeedNotFlushL1d
688 || pVM->cpum.ro.HostFeatures.fArchRdclNo)
689 pVM->hm.s.fL1dFlushOnSched = pVM->hm.s.fL1dFlushOnVmEntry = false;
690
691 /*
692 * Check if MDS flush is needed/possible.
693 * On atoms and knight family CPUs, we will only allow clearing on scheduling.
694 */
695 if ( !pVM->cpum.ro.HostFeatures.fMdsClear
696 || pVM->cpum.ro.HostFeatures.fArchMdsNo)
697 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry = false;
698 else if ( ( pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Atom_Airmount
699 && pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Atom_End)
700 || ( pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Phi_KnightsLanding
701 && pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Phi_End))
702 {
703 if (!pVM->hm.s.fMdsClearOnSched)
704 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry;
705 pVM->hm.s.fMdsClearOnVmEntry = false;
706 }
707 else if ( pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem
708 || pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_End)
709 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry = false;
710
711 /*
712 * Statistics.
713 */
714#ifdef VBOX_WITH_STATISTICS
715 STAM_REG(pVM, &pVM->hm.s.StatTprPatchSuccess, STAMTYPE_COUNTER, "/HM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
716 STAM_REG(pVM, &pVM->hm.s.StatTprPatchFailure, STAMTYPE_COUNTER, "/HM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
717 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccessCr8, STAMTYPE_COUNTER, "/HM/TPR/Replace/SuccessCR8", STAMUNIT_OCCURENCES, "Number of instruction replacements by MOV CR8.");
718 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccessVmc, STAMTYPE_COUNTER, "/HM/TPR/Replace/SuccessVMC", STAMUNIT_OCCURENCES, "Number of instruction replacements by VMMCALL.");
719 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceFailure, STAMTYPE_COUNTER, "/HM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful replace attempts.");
720#endif
721
722 bool const fCpuSupportsVmx = ASMIsIntelCpu() || ASMIsViaCentaurCpu() || ASMIsShanghaiCpu();
723 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
724 {
725 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
726 PHMCPU pHmCpu = &pVCpu->hm.s;
727 int rc;
728
729# define HM_REG_STAT(a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szNmFmt, a_szDesc) do { \
730 rc = STAMR3RegisterF(pVM, a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szDesc, a_szNmFmt, idCpu); \
731 AssertRC(rc); \
732 } while (0)
733# define HM_REG_PROFILE(a_pVar, a_szNmFmt, a_szDesc) \
734 HM_REG_STAT(a_pVar, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, a_szNmFmt, a_szDesc)
735
736#ifdef VBOX_WITH_STATISTICS
737
738 HM_REG_PROFILE(&pHmCpu->StatPoke, "/PROF/CPU%u/HM/Poke", "Profiling of RTMpPokeCpu.");
739 HM_REG_PROFILE(&pHmCpu->StatSpinPoke, "/PROF/CPU%u/HM/PokeWait", "Profiling of poke wait.");
740 HM_REG_PROFILE(&pHmCpu->StatSpinPokeFailed, "/PROF/CPU%u/HM/PokeWaitFailed", "Profiling of poke wait when RTMpPokeCpu fails.");
741 HM_REG_PROFILE(&pHmCpu->StatEntry, "/PROF/CPU%u/HM/Entry", "Profiling of entry until entering GC.");
742 HM_REG_PROFILE(&pHmCpu->StatPreExit, "/PROF/CPU%u/HM/SwitchFromGC_1", "Profiling of pre-exit processing after returning from GC.");
743 HM_REG_PROFILE(&pHmCpu->StatExitHandling, "/PROF/CPU%u/HM/SwitchFromGC_2", "Profiling of exit handling (longjmps not included!)");
744 HM_REG_PROFILE(&pHmCpu->StatExitIO, "/PROF/CPU%u/HM/SwitchFromGC_2/IO", "I/O.");
745 HM_REG_PROFILE(&pHmCpu->StatExitMovCRx, "/PROF/CPU%u/HM/SwitchFromGC_2/MovCRx", "MOV CRx.");
746 HM_REG_PROFILE(&pHmCpu->StatExitXcptNmi, "/PROF/CPU%u/HM/SwitchFromGC_2/XcptNmi", "Exceptions, NMIs.");
747 HM_REG_PROFILE(&pHmCpu->StatExitVmentry, "/PROF/CPU%u/HM/SwitchFromGC_2/Vmentry", "VMLAUNCH/VMRESUME on Intel or VMRUN on AMD.");
748 HM_REG_PROFILE(&pHmCpu->StatImportGuestState, "/PROF/CPU%u/HM/ImportGuestState", "Profiling of importing guest state from hardware after VM-exit.");
749 HM_REG_PROFILE(&pHmCpu->StatExportGuestState, "/PROF/CPU%u/HM/ExportGuestState", "Profiling of exporting guest state to hardware before VM-entry.");
750 HM_REG_PROFILE(&pHmCpu->StatLoadGuestFpuState, "/PROF/CPU%u/HM/LoadGuestFpuState", "Profiling of CPUMR0LoadGuestFPU.");
751 HM_REG_PROFILE(&pHmCpu->StatInGC, "/PROF/CPU%u/HM/InGC", "Profiling of execution of guest-code in hardware.");
752# ifdef HM_PROFILE_EXIT_DISPATCH
753 HM_REG_STAT(&pHmCpu->StatExitDispatch, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
754 "/PROF/CPU%u/HM/ExitDispatch", "Profiling the dispatching of exit handlers.");
755# endif
756#endif
757# define HM_REG_COUNTER(a, b, desc) HM_REG_STAT(a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, b, desc)
758
759#ifdef VBOX_WITH_STATISTICS
760 HM_REG_COUNTER(&pHmCpu->StatExitAll, "/HM/CPU%u/Exit/All", "Total exits (including nested-guest exits).");
761 HM_REG_COUNTER(&pHmCpu->StatNestedExitAll, "/HM/CPU%u/Exit/NestedGuest/All", "Total nested-guest exits.");
762 HM_REG_COUNTER(&pHmCpu->StatExitShadowNM, "/HM/CPU%u/Exit/Trap/Shw/#NM", "Shadow #NM (device not available, no math co-processor) exception.");
763 HM_REG_COUNTER(&pHmCpu->StatExitGuestNM, "/HM/CPU%u/Exit/Trap/Gst/#NM", "Guest #NM (device not available, no math co-processor) exception.");
764 HM_REG_COUNTER(&pHmCpu->StatExitShadowPF, "/HM/CPU%u/Exit/Trap/Shw/#PF", "Shadow #PF (page fault) exception.");
765 HM_REG_COUNTER(&pHmCpu->StatExitShadowPFEM, "/HM/CPU%u/Exit/Trap/Shw/#PF-EM", "#PF (page fault) exception going back to ring-3 for emulating the instruction.");
766 HM_REG_COUNTER(&pHmCpu->StatExitGuestPF, "/HM/CPU%u/Exit/Trap/Gst/#PF", "Guest #PF (page fault) exception.");
767 HM_REG_COUNTER(&pHmCpu->StatExitGuestUD, "/HM/CPU%u/Exit/Trap/Gst/#UD", "Guest #UD (undefined opcode) exception.");
768 HM_REG_COUNTER(&pHmCpu->StatExitGuestSS, "/HM/CPU%u/Exit/Trap/Gst/#SS", "Guest #SS (stack-segment fault) exception.");
769 HM_REG_COUNTER(&pHmCpu->StatExitGuestNP, "/HM/CPU%u/Exit/Trap/Gst/#NP", "Guest #NP (segment not present) exception.");
770 HM_REG_COUNTER(&pHmCpu->StatExitGuestTS, "/HM/CPU%u/Exit/Trap/Gst/#TS", "Guest #TS (task switch) exception.");
771 HM_REG_COUNTER(&pHmCpu->StatExitGuestOF, "/HM/CPU%u/Exit/Trap/Gst/#OF", "Guest #OF (overflow) exception.");
772 HM_REG_COUNTER(&pHmCpu->StatExitGuestGP, "/HM/CPU%u/Exit/Trap/Gst/#GP", "Guest #GP (general protection) exception.");
773 HM_REG_COUNTER(&pHmCpu->StatExitGuestDE, "/HM/CPU%u/Exit/Trap/Gst/#DE", "Guest #DE (divide error) exception.");
774 HM_REG_COUNTER(&pHmCpu->StatExitGuestDF, "/HM/CPU%u/Exit/Trap/Gst/#DF", "Guest #DF (double fault) exception.");
775 HM_REG_COUNTER(&pHmCpu->StatExitGuestBR, "/HM/CPU%u/Exit/Trap/Gst/#BR", "Guest #BR (boundary range exceeded) exception.");
776#endif
777 HM_REG_COUNTER(&pHmCpu->StatExitGuestAC, "/HM/CPU%u/Exit/Trap/Gst/#AC", "Guest #AC (alignment check) exception.");
778 if (fCpuSupportsVmx)
779 HM_REG_COUNTER(&pHmCpu->StatExitGuestACSplitLock, "/HM/CPU%u/Exit/Trap/Gst/#AC-split-lock", "Guest triggered #AC due to split-lock being enabled on the host (interpreted).");
780#ifdef VBOX_WITH_STATISTICS
781 HM_REG_COUNTER(&pHmCpu->StatExitGuestDB, "/HM/CPU%u/Exit/Trap/Gst/#DB", "Guest #DB (debug) exception.");
782 HM_REG_COUNTER(&pHmCpu->StatExitGuestMF, "/HM/CPU%u/Exit/Trap/Gst/#MF", "Guest #MF (x87 FPU error, math fault) exception.");
783 HM_REG_COUNTER(&pHmCpu->StatExitGuestBP, "/HM/CPU%u/Exit/Trap/Gst/#BP", "Guest #BP (breakpoint) exception.");
784 HM_REG_COUNTER(&pHmCpu->StatExitGuestXF, "/HM/CPU%u/Exit/Trap/Gst/#XF", "Guest #XF (extended math fault, SIMD FPU) exception.");
785 HM_REG_COUNTER(&pHmCpu->StatExitGuestXcpUnk, "/HM/CPU%u/Exit/Trap/Gst/Other", "Other guest exceptions.");
786 HM_REG_COUNTER(&pHmCpu->StatExitRdmsr, "/HM/CPU%u/Exit/Instr/Rdmsr", "MSR read.");
787 HM_REG_COUNTER(&pHmCpu->StatExitWrmsr, "/HM/CPU%u/Exit/Instr/Wrmsr", "MSR write.");
788 HM_REG_COUNTER(&pHmCpu->StatExitDRxWrite, "/HM/CPU%u/Exit/Instr/DR-Write", "Debug register write.");
789 HM_REG_COUNTER(&pHmCpu->StatExitDRxRead, "/HM/CPU%u/Exit/Instr/DR-Read", "Debug register read.");
790 HM_REG_COUNTER(&pHmCpu->StatExitCR0Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR0", "CR0 read.");
791 HM_REG_COUNTER(&pHmCpu->StatExitCR2Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR2", "CR2 read.");
792 HM_REG_COUNTER(&pHmCpu->StatExitCR3Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR3", "CR3 read.");
793 HM_REG_COUNTER(&pHmCpu->StatExitCR4Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR4", "CR4 read.");
794 HM_REG_COUNTER(&pHmCpu->StatExitCR8Read, "/HM/CPU%u/Exit/Instr/CR-Read/CR8", "CR8 read.");
795 HM_REG_COUNTER(&pHmCpu->StatExitCR0Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR0", "CR0 write.");
796 HM_REG_COUNTER(&pHmCpu->StatExitCR2Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR2", "CR2 write.");
797 HM_REG_COUNTER(&pHmCpu->StatExitCR3Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR3", "CR3 write.");
798 HM_REG_COUNTER(&pHmCpu->StatExitCR4Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR4", "CR4 write.");
799 HM_REG_COUNTER(&pHmCpu->StatExitCR8Write, "/HM/CPU%u/Exit/Instr/CR-Write/CR8", "CR8 write.");
800 HM_REG_COUNTER(&pHmCpu->StatExitClts, "/HM/CPU%u/Exit/Instr/CLTS", "CLTS instruction.");
801 HM_REG_COUNTER(&pHmCpu->StatExitLmsw, "/HM/CPU%u/Exit/Instr/LMSW", "LMSW instruction.");
802 HM_REG_COUNTER(&pHmCpu->StatExitXdtrAccess, "/HM/CPU%u/Exit/Instr/XdtrAccess", "GDTR, IDTR, LDTR access.");
803 HM_REG_COUNTER(&pHmCpu->StatExitIOWrite, "/HM/CPU%u/Exit/Instr/IO/Write", "I/O write.");
804 HM_REG_COUNTER(&pHmCpu->StatExitIORead, "/HM/CPU%u/Exit/Instr/IO/Read", "I/O read.");
805 HM_REG_COUNTER(&pHmCpu->StatExitIOStringWrite, "/HM/CPU%u/Exit/Instr/IO/WriteString", "String I/O write.");
806 HM_REG_COUNTER(&pHmCpu->StatExitIOStringRead, "/HM/CPU%u/Exit/Instr/IO/ReadString", "String I/O read.");
807 HM_REG_COUNTER(&pHmCpu->StatExitIntWindow, "/HM/CPU%u/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts.");
808 HM_REG_COUNTER(&pHmCpu->StatExitExtInt, "/HM/CPU%u/Exit/ExtInt", "Physical maskable interrupt (host).");
809#endif
810 HM_REG_COUNTER(&pHmCpu->StatExitHostNmiInGC, "/HM/CPU%u/Exit/HostNmiInGC", "Host NMI received while in guest context.");
811 HM_REG_COUNTER(&pHmCpu->StatExitHostNmiInGCIpi, "/HM/CPU%u/Exit/HostNmiInGCIpi", "Host NMI received while in guest context dispatched using IPIs.");
812 HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer, "/HM/CPU%u/Exit/PreemptTimer", "VMX-preemption timer expired.");
813#ifdef VBOX_WITH_STATISTICS
814 HM_REG_COUNTER(&pHmCpu->StatExitTprBelowThreshold, "/HM/CPU%u/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest.");
815 HM_REG_COUNTER(&pHmCpu->StatExitTaskSwitch, "/HM/CPU%u/Exit/TaskSwitch", "Task switch caused through task gate in IDT.");
816 HM_REG_COUNTER(&pHmCpu->StatExitApicAccess, "/HM/CPU%u/Exit/ApicAccess", "APIC access. Guest attempted to access memory at a physical address on the APIC-access page.");
817
818 HM_REG_COUNTER(&pHmCpu->StatSwitchTprMaskedIrq, "/HM/CPU%u/Switch/TprMaskedIrq", "PDMGetInterrupt() signals TPR masks pending Irq.");
819 HM_REG_COUNTER(&pHmCpu->StatSwitchGuestIrq, "/HM/CPU%u/Switch/IrqPending", "PDMGetInterrupt() cleared behind our back!?!.");
820 HM_REG_COUNTER(&pHmCpu->StatSwitchPendingHostIrq, "/HM/CPU%u/Switch/PendingHostIrq", "Exit to ring-3 due to pending host interrupt before executing guest code.");
821 HM_REG_COUNTER(&pHmCpu->StatSwitchHmToR3FF, "/HM/CPU%u/Switch/HmToR3FF", "Exit to ring-3 due to pending timers, EMT rendezvous, critical section etc.");
822 HM_REG_COUNTER(&pHmCpu->StatSwitchVmReq, "/HM/CPU%u/Switch/VmReq", "Exit to ring-3 due to pending VM requests.");
823 HM_REG_COUNTER(&pHmCpu->StatSwitchPgmPoolFlush, "/HM/CPU%u/Switch/PgmPoolFlush", "Exit to ring-3 due to pending PGM pool flush.");
824 HM_REG_COUNTER(&pHmCpu->StatSwitchDma, "/HM/CPU%u/Switch/PendingDma", "Exit to ring-3 due to pending DMA requests.");
825 HM_REG_COUNTER(&pHmCpu->StatSwitchExitToR3, "/HM/CPU%u/Switch/ExitToR3", "Exit to ring-3 (total).");
826 HM_REG_COUNTER(&pHmCpu->StatSwitchLongJmpToR3, "/HM/CPU%u/Switch/LongJmpToR3", "Longjump to ring-3.");
827 HM_REG_COUNTER(&pHmCpu->StatSwitchMaxResumeLoops, "/HM/CPU%u/Switch/MaxResumeLoops", "Maximum VMRESUME inner-loop counter reached.");
828 HM_REG_COUNTER(&pHmCpu->StatSwitchHltToR3, "/HM/CPU%u/Switch/HltToR3", "HLT causing us to go to ring-3.");
829 HM_REG_COUNTER(&pHmCpu->StatSwitchApicAccessToR3, "/HM/CPU%u/Switch/ApicAccessToR3", "APIC access causing us to go to ring-3.");
830#endif
831 HM_REG_COUNTER(&pHmCpu->StatSwitchPreempt, "/HM/CPU%u/Switch/Preempting", "EMT has been preempted while in HM context.");
832#ifdef VBOX_WITH_STATISTICS
833 HM_REG_COUNTER(&pHmCpu->StatSwitchNstGstVmexit, "/HM/CPU%u/Switch/NstGstVmexit", "Nested-guest VM-exit occurred.");
834
835 HM_REG_COUNTER(&pHmCpu->StatInjectInterrupt, "/HM/CPU%u/EventInject/Interrupt", "Injected an external interrupt into the guest.");
836 HM_REG_COUNTER(&pHmCpu->StatInjectXcpt, "/HM/CPU%u/EventInject/Trap", "Injected an exception into the guest.");
837 HM_REG_COUNTER(&pHmCpu->StatInjectReflect, "/HM/CPU%u/EventInject/Reflect", "Reflecting an exception caused due to event injection.");
838 HM_REG_COUNTER(&pHmCpu->StatInjectConvertDF, "/HM/CPU%u/EventInject/ReflectDF", "Injected a converted #DF caused due to event injection.");
839 HM_REG_COUNTER(&pHmCpu->StatInjectInterpret, "/HM/CPU%u/EventInject/Interpret", "Falling back to interpreter for handling exception caused due to event injection.");
840 HM_REG_COUNTER(&pHmCpu->StatInjectReflectNPF, "/HM/CPU%u/EventInject/ReflectNPF", "Reflecting event that caused an EPT violation / nested #PF.");
841
842 HM_REG_COUNTER(&pHmCpu->StatFlushPage, "/HM/CPU%u/Flush/Page", "Invalidating a guest page on all guest CPUs.");
843 HM_REG_COUNTER(&pHmCpu->StatFlushPageManual, "/HM/CPU%u/Flush/Page/Virt", "Invalidating a guest page using guest-virtual address.");
844 HM_REG_COUNTER(&pHmCpu->StatFlushPhysPageManual, "/HM/CPU%u/Flush/Page/Phys", "Invalidating a guest page using guest-physical address.");
845 HM_REG_COUNTER(&pHmCpu->StatFlushTlb, "/HM/CPU%u/Flush/TLB", "Forcing a full guest-TLB flush (ring-0).");
846 HM_REG_COUNTER(&pHmCpu->StatFlushTlbManual, "/HM/CPU%u/Flush/TLB/Manual", "Request a full guest-TLB flush.");
847 HM_REG_COUNTER(&pHmCpu->StatFlushTlbNstGst, "/HM/CPU%u/Flush/TLB/NestedGuest", "Request a nested-guest-TLB flush.");
848 HM_REG_COUNTER(&pHmCpu->StatFlushTlbWorldSwitch, "/HM/CPU%u/Flush/TLB/CpuSwitch", "Forcing a full guest-TLB flush due to host-CPU reschedule or ASID-limit hit by another guest-VCPU.");
849 HM_REG_COUNTER(&pHmCpu->StatNoFlushTlbWorldSwitch, "/HM/CPU%u/Flush/TLB/Skipped", "No TLB flushing required.");
850 HM_REG_COUNTER(&pHmCpu->StatFlushEntire, "/HM/CPU%u/Flush/TLB/Entire", "Flush the entire TLB (host + guest).");
851 HM_REG_COUNTER(&pHmCpu->StatFlushAsid, "/HM/CPU%u/Flush/TLB/ASID", "Flushed guest-TLB entries for the current VPID.");
852 HM_REG_COUNTER(&pHmCpu->StatFlushNestedPaging, "/HM/CPU%u/Flush/TLB/NestedPaging", "Flushed guest-TLB entries for the current EPT.");
853 HM_REG_COUNTER(&pHmCpu->StatFlushTlbInvlpgVirt, "/HM/CPU%u/Flush/TLB/InvlpgVirt", "Invalidated a guest-TLB entry for a guest-virtual address.");
854 HM_REG_COUNTER(&pHmCpu->StatFlushTlbInvlpgPhys, "/HM/CPU%u/Flush/TLB/InvlpgPhys", "Currently not possible, flushes entire guest-TLB.");
855 HM_REG_COUNTER(&pHmCpu->StatTlbShootdown, "/HM/CPU%u/Flush/Shootdown/Page", "Inter-VCPU request to flush queued guest page.");
856 HM_REG_COUNTER(&pHmCpu->StatTlbShootdownFlush, "/HM/CPU%u/Flush/Shootdown/TLB", "Inter-VCPU request to flush entire guest-TLB.");
857
858 HM_REG_COUNTER(&pHmCpu->StatTscParavirt, "/HM/CPU%u/TSC/Paravirt", "Paravirtualized TSC in effect.");
859 HM_REG_COUNTER(&pHmCpu->StatTscOffset, "/HM/CPU%u/TSC/Offset", "TSC offsetting is in effect.");
860 HM_REG_COUNTER(&pHmCpu->StatTscIntercept, "/HM/CPU%u/TSC/Intercept", "Intercept TSC accesses.");
861
862 HM_REG_COUNTER(&pHmCpu->StatDRxArmed, "/HM/CPU%u/Debug/Armed", "Loaded guest-debug state while loading guest-state.");
863 HM_REG_COUNTER(&pHmCpu->StatDRxContextSwitch, "/HM/CPU%u/Debug/ContextSwitch", "Loaded guest-debug state on MOV DRx.");
864 HM_REG_COUNTER(&pHmCpu->StatDRxIoCheck, "/HM/CPU%u/Debug/IOCheck", "Checking for I/O breakpoint.");
865
866 HM_REG_COUNTER(&pHmCpu->StatExportMinimal, "/HM/CPU%u/Export/Minimal", "VM-entry exporting minimal guest-state.");
867 HM_REG_COUNTER(&pHmCpu->StatExportFull, "/HM/CPU%u/Export/Full", "VM-entry exporting the full guest-state.");
868 HM_REG_COUNTER(&pHmCpu->StatLoadGuestFpu, "/HM/CPU%u/Export/GuestFpu", "VM-entry loading the guest-FPU state.");
869 HM_REG_COUNTER(&pHmCpu->StatExportHostState, "/HM/CPU%u/Export/HostState", "VM-entry exporting host-state.");
870
871 if (fCpuSupportsVmx)
872 {
873 HM_REG_COUNTER(&pHmCpu->StatVmxWriteHostRip, "/HM/CPU%u/WriteHostRIP", "Number of VMX_VMCS_HOST_RIP instructions.");
874 HM_REG_COUNTER(&pHmCpu->StatVmxWriteHostRsp, "/HM/CPU%u/WriteHostRSP", "Number of VMX_VMCS_HOST_RSP instructions.");
875 HM_REG_COUNTER(&pHmCpu->StatVmxVmLaunch, "/HM/CPU%u/VMLaunch", "Number of VM-entries using VMLAUNCH.");
876 HM_REG_COUNTER(&pHmCpu->StatVmxVmResume, "/HM/CPU%u/VMResume", "Number of VM-entries using VMRESUME.");
877 }
878
879 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelBase, "/HM/CPU%u/VMXCheck/RMSelBase", "Could not use VMX due to unsuitable real-mode selector base.");
880 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelLimit, "/HM/CPU%u/VMXCheck/RMSelLimit", "Could not use VMX due to unsuitable real-mode selector limit.");
881 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRmSelAttr, "/HM/CPU%u/VMXCheck/RMSelAttrs", "Could not use VMX due to unsuitable real-mode selector attributes.");
882
883 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelBase, "/HM/CPU%u/VMXCheck/V86SelBase", "Could not use VMX due to unsuitable v8086-mode selector base.");
884 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelLimit, "/HM/CPU%u/VMXCheck/V86SelLimit", "Could not use VMX due to unsuitable v8086-mode selector limit.");
885 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadV86SelAttr, "/HM/CPU%u/VMXCheck/V86SelAttrs", "Could not use VMX due to unsuitable v8086-mode selector attributes.");
886
887 HM_REG_COUNTER(&pHmCpu->StatVmxCheckRmOk, "/HM/CPU%u/VMXCheck/VMX_RM", "VMX execution in real (V86) mode OK.");
888 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadSel, "/HM/CPU%u/VMXCheck/Selector", "Could not use VMX due to unsuitable selector.");
889 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRpl, "/HM/CPU%u/VMXCheck/RPL", "Could not use VMX due to unsuitable RPL.");
890 HM_REG_COUNTER(&pHmCpu->StatVmxCheckPmOk, "/HM/CPU%u/VMXCheck/VMX_PM", "VMX execution in protected mode OK.");
891#endif
892 if (fCpuSupportsVmx)
893 {
894 HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer, "/HM/CPU%u/PreemptTimer", "VMX-preemption timer fired.");
895 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionReusingDeadline, "/HM/CPU%u/PreemptTimer/ReusingDeadline", "VMX-preemption timer arming logic using previously calculated deadline");
896 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionReusingDeadlineExpired, "/HM/CPU%u/PreemptTimer/ReusingDeadlineExpired", "VMX-preemption timer arming logic found previous deadline already expired (ignored)");
897 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionRecalcingDeadline, "/HM/CPU%u/PreemptTimer/RecalcingDeadline", "VMX-preemption timer arming logic recalculating the deadline (slightly expensive)");
898 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionRecalcingDeadlineExpired, "/HM/CPU%u/PreemptTimer/RecalcingDeadlineExpired", "VMX-preemption timer arming logic found recalculated deadline expired (ignored)");
899 }
900#ifdef VBOX_WITH_STATISTICS
901 /*
902 * Guest Exit reason stats.
903 */
904 if (fCpuSupportsVmx)
905 {
906 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
907 {
908 const char *pszExitName = HMGetVmxExitName(j);
909 if (pszExitName)
910 {
911 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
912 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/Reason/%02x", idCpu, j);
913 AssertRCReturn(rc, rc);
914 }
915 }
916 }
917 else
918 {
919 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
920 {
921 const char *pszExitName = HMGetSvmExitName(j);
922 if (pszExitName)
923 {
924 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
925 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/Reason/%02x", idCpu, j);
926 AssertRC(rc);
927 }
928 }
929 }
930 HM_REG_COUNTER(&pHmCpu->StatExitReasonNpf, "/HM/CPU%u/Exit/Reason/#NPF", "Nested page faults");
931
932#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
933 /*
934 * Nested-guest VM-exit reason stats.
935 */
936 if (fCpuSupportsVmx)
937 {
938 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
939 {
940 const char *pszExitName = HMGetVmxExitName(j);
941 if (pszExitName)
942 {
943 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
944 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/NestedGuest/Reason/%02x", idCpu, j);
945 AssertRC(rc);
946 }
947 }
948 }
949 else
950 {
951 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
952 {
953 const char *pszExitName = HMGetSvmExitName(j);
954 if (pszExitName)
955 {
956 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
957 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%u/Exit/NestedGuest/Reason/%02x", idCpu, j);
958 AssertRC(rc);
959 }
960 }
961 }
962 HM_REG_COUNTER(&pHmCpu->StatNestedExitReasonNpf, "/HM/CPU%u/Exit/NestedGuest/Reason/#NPF", "Nested page faults");
963#endif
964
965 /*
966 * Injected interrupts stats.
967 */
968 char szDesc[64];
969 for (unsigned j = 0; j < RT_ELEMENTS(pHmCpu->aStatInjectedIrqs); j++)
970 {
971 RTStrPrintf(&szDesc[0], sizeof(szDesc), "Interrupt %u", j);
972 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
973 STAMUNIT_OCCURENCES, szDesc, "/HM/CPU%u/EventInject/InjectIntr/%02X", idCpu, j);
974 AssertRC(rc);
975 }
976
977 /*
978 * Injected exception stats.
979 */
980 for (unsigned j = 0; j < RT_ELEMENTS(pHmCpu->aStatInjectedXcpts); j++)
981 {
982 RTStrPrintf(&szDesc[0], sizeof(szDesc), "%s exception", hmR3GetXcptName(j));
983 rc = STAMR3RegisterF(pVM, &pHmCpu->aStatInjectedXcpts[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
984 STAMUNIT_OCCURENCES, szDesc, "/HM/CPU%u/EventInject/InjectXcpt/%02X", idCpu, j);
985 AssertRC(rc);
986 }
987
988#endif /* VBOX_WITH_STATISTICS */
989#undef HM_REG_COUNTER
990#undef HM_REG_PROFILE
991#undef HM_REG_STAT
992 }
993
994 return VINF_SUCCESS;
995}
996
997
998/**
999 * Called when a init phase has completed.
1000 *
1001 * @returns VBox status code.
1002 * @param pVM The cross context VM structure.
1003 * @param enmWhat The phase that completed.
1004 */
1005VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1006{
1007 switch (enmWhat)
1008 {
1009 case VMINITCOMPLETED_RING3:
1010 return hmR3InitFinalizeR3(pVM);
1011 case VMINITCOMPLETED_RING0:
1012 return hmR3InitFinalizeR0(pVM);
1013 default:
1014 return VINF_SUCCESS;
1015 }
1016}
1017
1018
1019/**
1020 * Turns off normal raw mode features.
1021 *
1022 * @param pVM The cross context VM structure.
1023 */
1024static void hmR3DisableRawMode(PVM pVM)
1025{
1026/** @todo r=bird: HM shouldn't be doing this crap. */
1027 /* Reinit the paging mode to force the new shadow mode. */
1028 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1029 {
1030 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1031 PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL);
1032 }
1033}
1034
1035
1036/**
1037 * Initialize VT-x or AMD-V.
1038 *
1039 * @returns VBox status code.
1040 * @param pVM The cross context VM structure.
1041 */
1042static int hmR3InitFinalizeR0(PVM pVM)
1043{
1044 int rc;
1045
1046 if (!HMIsEnabled(pVM))
1047 return VINF_SUCCESS;
1048
1049 /*
1050 * Hack to allow users to work around broken BIOSes that incorrectly set
1051 * EFER.SVME, which makes us believe somebody else is already using AMD-V.
1052 */
1053 if ( !pVM->hm.s.vmx.fSupported
1054 && !pVM->hm.s.svm.fSupported
1055 && pVM->hm.s.ForR3.rcInit == VERR_SVM_IN_USE /* implies functional AMD-V */
1056 && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
1057 {
1058 LogRel(("HM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
1059 pVM->hm.s.svm.fSupported = true;
1060 pVM->hm.s.svm.fIgnoreInUseError = true;
1061 pVM->hm.s.ForR3.rcInit = VINF_SUCCESS;
1062 }
1063
1064 /*
1065 * Report ring-0 init errors.
1066 */
1067 if ( !pVM->hm.s.vmx.fSupported
1068 && !pVM->hm.s.svm.fSupported)
1069 {
1070 LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.ForR3.rcInit));
1071 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.ForR3.vmx.u64HostFeatCtrl));
1072 switch (pVM->hm.s.ForR3.rcInit)
1073 {
1074 case VERR_VMX_IN_VMX_ROOT_MODE:
1075 return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor");
1076 case VERR_VMX_NO_VMX:
1077 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available");
1078 case VERR_VMX_MSR_VMX_DISABLED:
1079 return VM_SET_ERROR(pVM, VERR_VMX_MSR_VMX_DISABLED, "VT-x is disabled in the BIOS");
1080 case VERR_VMX_MSR_ALL_VMX_DISABLED:
1081 return VM_SET_ERROR(pVM, VERR_VMX_MSR_ALL_VMX_DISABLED, "VT-x is disabled in the BIOS for all CPU modes");
1082 case VERR_VMX_MSR_LOCKING_FAILED:
1083 return VM_SET_ERROR(pVM, VERR_VMX_MSR_LOCKING_FAILED, "Failed to lock VT-x features while trying to enable VT-x");
1084 case VERR_VMX_MSR_VMX_ENABLE_FAILED:
1085 return VM_SET_ERROR(pVM, VERR_VMX_MSR_VMX_ENABLE_FAILED, "Failed to enable VT-x features");
1086 case VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED:
1087 return VM_SET_ERROR(pVM, VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED, "Failed to enable VT-x features in SMX mode");
1088
1089 case VERR_SVM_IN_USE:
1090 return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor");
1091 case VERR_SVM_NO_SVM:
1092 return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available");
1093 case VERR_SVM_DISABLED:
1094 return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS");
1095 }
1096 return VMSetError(pVM, pVM->hm.s.ForR3.rcInit, RT_SRC_POS, "HM ring-0 init failed: %Rrc", pVM->hm.s.ForR3.rcInit);
1097 }
1098
1099 /*
1100 * Enable VT-x or AMD-V on all host CPUs.
1101 */
1102 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_HM_ENABLE, 0, NULL);
1103 if (RT_FAILURE(rc))
1104 {
1105 LogRel(("HM: Failed to enable, error %Rrc\n", rc));
1106 HMR3CheckError(pVM, rc);
1107 return rc;
1108 }
1109
1110 /*
1111 * No TPR patching is required when the IO-APIC is not enabled for this VM.
1112 * (Main should have taken care of this already)
1113 */
1114 if (!PDMHasIoApic(pVM))
1115 {
1116 Assert(!pVM->hm.s.fTprPatchingAllowed); /* paranoia */
1117 pVM->hm.s.fTprPatchingAllowed = false;
1118 }
1119
1120 LogRel(("HM: fWorldSwitcher=%#x (fIbpbOnVmExit=%RTbool fIbpbOnVmEntry=%RTbool fL1dFlushOnVmEntry=%RTbool); fL1dFlushOnSched=%RTbool fMdsClearOnVmEntry=%RTbool\n",
1121 pVM->hm.s.ForR3.fWorldSwitcher, pVM->hm.s.fIbpbOnVmExit, pVM->hm.s.fIbpbOnVmEntry, pVM->hm.s.fL1dFlushOnVmEntry,
1122 pVM->hm.s.fL1dFlushOnSched, pVM->hm.s.fMdsClearOnVmEntry));
1123
1124 /*
1125 * Do the vendor specific initialization
1126 *
1127 * Note! We disable release log buffering here since we're doing relatively
1128 * lot of logging and doesn't want to hit the disk with each LogRel
1129 * statement.
1130 */
1131 AssertLogRelReturn(!pVM->hm.s.fInitialized, VERR_HM_IPE_5);
1132 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
1133 if (pVM->hm.s.vmx.fSupported)
1134 rc = hmR3InitFinalizeR0Intel(pVM);
1135 else
1136 rc = hmR3InitFinalizeR0Amd(pVM);
1137 LogRel((pVM->hm.s.fGlobalInit ? "HM: VT-x/AMD-V init method: Global\n"
1138 : "HM: VT-x/AMD-V init method: Local\n"));
1139 RTLogRelSetBuffering(fOldBuffered);
1140 pVM->hm.s.fInitialized = true;
1141
1142 return rc;
1143}
1144
1145
1146/**
1147 * @callback_method_impl{FNPDMVMMDEVHEAPNOTIFY}
1148 */
1149static DECLCALLBACK(void) hmR3VmmDevHeapNotify(PVM pVM, void *pvAllocation, RTGCPHYS GCPhysAllocation)
1150{
1151 NOREF(pVM);
1152 NOREF(pvAllocation);
1153 NOREF(GCPhysAllocation);
1154}
1155
1156
1157/**
1158 * Returns a description of the VMCS (and associated regions') memory type given the
1159 * IA32_VMX_BASIC MSR.
1160 *
1161 * @returns The descriptive memory type.
1162 * @param uMsrVmxBasic IA32_VMX_BASIC MSR value.
1163 */
1164static const char *hmR3VmxGetMemTypeDesc(uint64_t uMsrVmxBasic)
1165{
1166 uint8_t const uMemType = RT_BF_GET(uMsrVmxBasic, VMX_BF_BASIC_VMCS_MEM_TYPE);
1167 switch (uMemType)
1168 {
1169 case VMX_BASIC_MEM_TYPE_WB: return "Write Back (WB)";
1170 case VMX_BASIC_MEM_TYPE_UC: return "Uncacheable (UC)";
1171 }
1172 return "Unknown";
1173}
1174
1175
1176/**
1177 * Returns a single-line description of all the activity-states supported by the CPU
1178 * given the IA32_VMX_MISC MSR.
1179 *
1180 * @returns All supported activity states.
1181 * @param uMsrMisc IA32_VMX_MISC MSR value.
1182 */
1183static const char *hmR3VmxGetActivityStateAllDesc(uint64_t uMsrMisc)
1184{
1185 static const char * const s_apszActStates[] =
1186 {
1187 "",
1188 " ( HLT )",
1189 " ( SHUTDOWN )",
1190 " ( HLT SHUTDOWN )",
1191 " ( SIPI_WAIT )",
1192 " ( HLT SIPI_WAIT )",
1193 " ( SHUTDOWN SIPI_WAIT )",
1194 " ( HLT SHUTDOWN SIPI_WAIT )"
1195 };
1196 uint8_t const idxActStates = RT_BF_GET(uMsrMisc, VMX_BF_MISC_ACTIVITY_STATES);
1197 Assert(idxActStates < RT_ELEMENTS(s_apszActStates));
1198 return s_apszActStates[idxActStates];
1199}
1200
1201
1202/**
1203 * Reports MSR_IA32_FEATURE_CONTROL MSR to the log.
1204 *
1205 * @param fFeatMsr The feature control MSR value.
1206 */
1207static void hmR3VmxReportFeatCtlMsr(uint64_t fFeatMsr)
1208{
1209 uint64_t const val = fFeatMsr;
1210 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", val));
1211 HMVMX_REPORT_MSR_CAP(val, "LOCK", MSR_IA32_FEATURE_CONTROL_LOCK);
1212 HMVMX_REPORT_MSR_CAP(val, "SMX_VMXON", MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
1213 HMVMX_REPORT_MSR_CAP(val, "VMXON", MSR_IA32_FEATURE_CONTROL_VMXON);
1214 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN0", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_0);
1215 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN1", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_1);
1216 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN2", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_2);
1217 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN3", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_3);
1218 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN4", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_4);
1219 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN5", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_5);
1220 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN6", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_6);
1221 HMVMX_REPORT_MSR_CAP(val, "SENTER_GLOBAL_EN", MSR_IA32_FEATURE_CONTROL_SENTER_GLOBAL_EN);
1222 HMVMX_REPORT_MSR_CAP(val, "SGX_LAUNCH_EN", MSR_IA32_FEATURE_CONTROL_SGX_LAUNCH_EN);
1223 HMVMX_REPORT_MSR_CAP(val, "SGX_GLOBAL_EN", MSR_IA32_FEATURE_CONTROL_SGX_GLOBAL_EN);
1224 HMVMX_REPORT_MSR_CAP(val, "LMCE", MSR_IA32_FEATURE_CONTROL_LMCE);
1225 if (!(val & MSR_IA32_FEATURE_CONTROL_LOCK))
1226 LogRel(("HM: MSR_IA32_FEATURE_CONTROL lock bit not set, possibly bad hardware!\n"));
1227}
1228
1229
1230/**
1231 * Reports MSR_IA32_VMX_BASIC MSR to the log.
1232 *
1233 * @param uBasicMsr The VMX basic MSR value.
1234 */
1235static void hmR3VmxReportBasicMsr(uint64_t uBasicMsr)
1236{
1237 LogRel(("HM: MSR_IA32_VMX_BASIC = %#RX64\n", uBasicMsr));
1238 LogRel(("HM: VMCS id = %#x\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_ID)));
1239 LogRel(("HM: VMCS size = %u bytes\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_SIZE)));
1240 LogRel(("HM: VMCS physical address limit = %s\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_PHYSADDR_WIDTH) ?
1241 "< 4 GB" : "None"));
1242 LogRel(("HM: VMCS memory type = %s\n", hmR3VmxGetMemTypeDesc(uBasicMsr)));
1243 LogRel(("HM: Dual-monitor treatment support = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_DUAL_MON)));
1244 LogRel(("HM: OUTS & INS instruction-info = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_INS_OUTS)));
1245 LogRel(("HM: Supports true-capability MSRs = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_TRUE_CTLS)));
1246 LogRel(("HM: VM-entry Xcpt error-code optional = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_XCPT_ERRCODE)));
1247}
1248
1249
1250/**
1251 * Reports MSR_IA32_PINBASED_CTLS to the log.
1252 *
1253 * @param pVmxMsr Pointer to the VMX MSR.
1254 */
1255static void hmR3VmxReportPinBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1256{
1257 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1258 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1259 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %#RX64\n", pVmxMsr->u));
1260 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EXT_INT_EXIT", VMX_PIN_CTLS_EXT_INT_EXIT);
1261 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "NMI_EXIT", VMX_PIN_CTLS_NMI_EXIT);
1262 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRTUAL_NMI", VMX_PIN_CTLS_VIRT_NMI);
1263 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PREEMPT_TIMER", VMX_PIN_CTLS_PREEMPT_TIMER);
1264 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "POSTED_INT", VMX_PIN_CTLS_POSTED_INT);
1265}
1266
1267
1268/**
1269 * Reports MSR_IA32_VMX_PROCBASED_CTLS MSR to the log.
1270 *
1271 * @param pVmxMsr Pointer to the VMX MSR.
1272 */
1273static void hmR3VmxReportProcBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1274{
1275 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1276 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1277 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %#RX64\n", pVmxMsr->u));
1278 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INT_WINDOW_EXIT", VMX_PROC_CTLS_INT_WINDOW_EXIT);
1279 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TSC_OFFSETTING", VMX_PROC_CTLS_USE_TSC_OFFSETTING);
1280 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "HLT_EXIT", VMX_PROC_CTLS_HLT_EXIT);
1281 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INVLPG_EXIT", VMX_PROC_CTLS_INVLPG_EXIT);
1282 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MWAIT_EXIT", VMX_PROC_CTLS_MWAIT_EXIT);
1283 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDPMC_EXIT", VMX_PROC_CTLS_RDPMC_EXIT);
1284 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDTSC_EXIT", VMX_PROC_CTLS_RDTSC_EXIT);
1285 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR3_LOAD_EXIT", VMX_PROC_CTLS_CR3_LOAD_EXIT);
1286 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR3_STORE_EXIT", VMX_PROC_CTLS_CR3_STORE_EXIT);
1287 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TERTIARY_CTLS", VMX_PROC_CTLS_USE_TERTIARY_CTLS);
1288 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR8_LOAD_EXIT", VMX_PROC_CTLS_CR8_LOAD_EXIT);
1289 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR8_STORE_EXIT", VMX_PROC_CTLS_CR8_STORE_EXIT);
1290 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TPR_SHADOW", VMX_PROC_CTLS_USE_TPR_SHADOW);
1291 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "NMI_WINDOW_EXIT", VMX_PROC_CTLS_NMI_WINDOW_EXIT);
1292 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MOV_DR_EXIT", VMX_PROC_CTLS_MOV_DR_EXIT);
1293 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "UNCOND_IO_EXIT", VMX_PROC_CTLS_UNCOND_IO_EXIT);
1294 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_IO_BITMAPS", VMX_PROC_CTLS_USE_IO_BITMAPS);
1295 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MONITOR_TRAP_FLAG", VMX_PROC_CTLS_MONITOR_TRAP_FLAG);
1296 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_MSR_BITMAPS", VMX_PROC_CTLS_USE_MSR_BITMAPS);
1297 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MONITOR_EXIT", VMX_PROC_CTLS_MONITOR_EXIT);
1298 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PAUSE_EXIT", VMX_PROC_CTLS_PAUSE_EXIT);
1299 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_SECONDARY_CTLS", VMX_PROC_CTLS_USE_SECONDARY_CTLS);
1300}
1301
1302
1303/**
1304 * Reports MSR_IA32_VMX_PROCBASED_CTLS2 MSR to the log.
1305 *
1306 * @param pVmxMsr Pointer to the VMX MSR.
1307 */
1308static void hmR3VmxReportProcBasedCtls2Msr(PCVMXCTLSMSR pVmxMsr)
1309{
1310 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1311 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1312 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %#RX64\n", pVmxMsr->u));
1313 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_APIC_ACCESS", VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
1314 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EPT", VMX_PROC_CTLS2_EPT);
1315 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "DESC_TABLE_EXIT", VMX_PROC_CTLS2_DESC_TABLE_EXIT);
1316 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDTSCP", VMX_PROC_CTLS2_RDTSCP);
1317 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_X2APIC_MODE", VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
1318 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VPID", VMX_PROC_CTLS2_VPID);
1319 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "WBINVD_EXIT", VMX_PROC_CTLS2_WBINVD_EXIT);
1320 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "UNRESTRICTED_GUEST", VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
1321 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "APIC_REG_VIRT", VMX_PROC_CTLS2_APIC_REG_VIRT);
1322 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_INT_DELIVERY", VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
1323 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PAUSE_LOOP_EXIT", VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
1324 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDRAND_EXIT", VMX_PROC_CTLS2_RDRAND_EXIT);
1325 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INVPCID", VMX_PROC_CTLS2_INVPCID);
1326 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VMFUNC", VMX_PROC_CTLS2_VMFUNC);
1327 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VMCS_SHADOWING", VMX_PROC_CTLS2_VMCS_SHADOWING);
1328 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENCLS_EXIT", VMX_PROC_CTLS2_ENCLS_EXIT);
1329 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDSEED_EXIT", VMX_PROC_CTLS2_RDSEED_EXIT);
1330 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PML", VMX_PROC_CTLS2_PML);
1331 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EPT_XCPT_VE", VMX_PROC_CTLS2_EPT_XCPT_VE);
1332 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_PROC_CTLS2_CONCEAL_VMX_FROM_PT);
1333 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "XSAVES_XRSTORS", VMX_PROC_CTLS2_XSAVES_XRSTORS);
1334 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MODE_BASED_EPT_PERM", VMX_PROC_CTLS2_MODE_BASED_EPT_PERM);
1335 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SPP_EPT", VMX_PROC_CTLS2_SPP_EPT);
1336 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PT_EPT", VMX_PROC_CTLS2_PT_EPT);
1337 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "TSC_SCALING", VMX_PROC_CTLS2_TSC_SCALING);
1338 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USER_WAIT_PAUSE", VMX_PROC_CTLS2_USER_WAIT_PAUSE);
1339 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENCLV_EXIT", VMX_PROC_CTLS2_ENCLV_EXIT);
1340}
1341
1342
1343/**
1344 * Reports MSR_IA32_VMX_PROCBASED_CTLS3 MSR to the log.
1345 *
1346 * @param uProcCtls3 The tertiary processor-based VM-execution control MSR.
1347 */
1348static void hmR3VmxReportProcBasedCtls3Msr(uint64_t uProcCtls3)
1349{
1350 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS3 = %#RX64\n", uProcCtls3));
1351 LogRel(("HM: LOADIWKEY_EXIT = %RTbool\n", RT_BOOL(uProcCtls3 & VMX_PROC_CTLS3_LOADIWKEY_EXIT)));
1352}
1353
1354
1355/**
1356 * Reports MSR_IA32_VMX_ENTRY_CTLS to the log.
1357 *
1358 * @param pVmxMsr Pointer to the VMX MSR.
1359 */
1360static void hmR3VmxReportEntryCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1361{
1362 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1363 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1364 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %#RX64\n", pVmxMsr->u));
1365 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_DEBUG", VMX_ENTRY_CTLS_LOAD_DEBUG);
1366 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "IA32E_MODE_GUEST", VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
1367 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENTRY_TO_SMM", VMX_ENTRY_CTLS_ENTRY_TO_SMM);
1368 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "DEACTIVATE_DUAL_MON", VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON);
1369 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PERF_MSR", VMX_ENTRY_CTLS_LOAD_PERF_MSR);
1370 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PAT_MSR", VMX_ENTRY_CTLS_LOAD_PAT_MSR);
1371 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_EFER_MSR", VMX_ENTRY_CTLS_LOAD_EFER_MSR);
1372 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_BNDCFGS_MSR", VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR);
1373 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT);
1374 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_RTIT_CTL_MSR", VMX_ENTRY_CTLS_LOAD_RTIT_CTL_MSR);
1375 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_CET_STATE", VMX_ENTRY_CTLS_LOAD_CET_STATE);
1376 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PKRS_MSR", VMX_ENTRY_CTLS_LOAD_PKRS_MSR);
1377}
1378
1379
1380/**
1381 * Reports MSR_IA32_VMX_EXIT_CTLS to the log.
1382 *
1383 * @param pVmxMsr Pointer to the VMX MSR.
1384 */
1385static void hmR3VmxReportExitCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1386{
1387 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1388 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1389 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %#RX64\n", pVmxMsr->u));
1390 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_DEBUG", VMX_EXIT_CTLS_SAVE_DEBUG);
1391 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "HOST_ADDR_SPACE_SIZE", VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
1392 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PERF_MSR", VMX_EXIT_CTLS_LOAD_PERF_MSR);
1393 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ACK_EXT_INT", VMX_EXIT_CTLS_ACK_EXT_INT);
1394 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_PAT_MSR", VMX_EXIT_CTLS_SAVE_PAT_MSR);
1395 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PAT_MSR", VMX_EXIT_CTLS_LOAD_PAT_MSR);
1396 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_EFER_MSR", VMX_EXIT_CTLS_SAVE_EFER_MSR);
1397 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_EFER_MSR", VMX_EXIT_CTLS_LOAD_EFER_MSR);
1398 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_PREEMPT_TIMER", VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1399 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CLEAR_BNDCFGS_MSR", VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR);
1400 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT);
1401 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CLEAR_RTIT_CTL_MSR", VMX_EXIT_CTLS_CLEAR_RTIT_CTL_MSR);
1402 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_CET_STATE", VMX_EXIT_CTLS_LOAD_CET_STATE);
1403 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PKRS_MSR", VMX_EXIT_CTLS_LOAD_PKRS_MSR);
1404}
1405
1406
1407/**
1408 * Reports MSR_IA32_VMX_EPT_VPID_CAP MSR to the log.
1409 *
1410 * @param fCaps The VMX EPT/VPID capability MSR value.
1411 */
1412static void hmR3VmxReportEptVpidCapsMsr(uint64_t fCaps)
1413{
1414 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %#RX64\n", fCaps));
1415 HMVMX_REPORT_MSR_CAP(fCaps, "RWX_X_ONLY", MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY);
1416 HMVMX_REPORT_MSR_CAP(fCaps, "PAGE_WALK_LENGTH_4", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4);
1417 HMVMX_REPORT_MSR_CAP(fCaps, "PAGE_WALK_LENGTH_5", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_5);
1418 HMVMX_REPORT_MSR_CAP(fCaps, "MEMTYPE_UC", MSR_IA32_VMX_EPT_VPID_CAP_MEMTYPE_UC);
1419 HMVMX_REPORT_MSR_CAP(fCaps, "MEMTYPE_WB", MSR_IA32_VMX_EPT_VPID_CAP_MEMTYPE_WB);
1420 HMVMX_REPORT_MSR_CAP(fCaps, "PDE_2M", MSR_IA32_VMX_EPT_VPID_CAP_PDE_2M);
1421 HMVMX_REPORT_MSR_CAP(fCaps, "PDPTE_1G", MSR_IA32_VMX_EPT_VPID_CAP_PDPTE_1G);
1422 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT);
1423 HMVMX_REPORT_MSR_CAP(fCaps, "ACCESS_DIRTY", MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY);
1424 HMVMX_REPORT_MSR_CAP(fCaps, "ADVEXITINFO_EPT_VIOLATION", MSR_IA32_VMX_EPT_VPID_CAP_ADVEXITINFO_EPT_VIOLATION);
1425 HMVMX_REPORT_MSR_CAP(fCaps, "SUPER_SHW_STACK", MSR_IA32_VMX_EPT_VPID_CAP_SUPER_SHW_STACK);
1426 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT);
1427 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
1428 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID);
1429 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_INDIV_ADDR", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
1430 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT);
1431 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS);
1432 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS);
1433}
1434
1435
1436/**
1437 * Reports MSR_IA32_VMX_MISC MSR to the log.
1438 *
1439 * @param pVM Pointer to the VM.
1440 * @param fMisc The VMX misc. MSR value.
1441 */
1442static void hmR3VmxReportMiscMsr(PVM pVM, uint64_t fMisc)
1443{
1444 LogRel(("HM: MSR_IA32_VMX_MISC = %#RX64\n", fMisc));
1445 uint8_t const cPreemptTimerShift = RT_BF_GET(fMisc, VMX_BF_MISC_PREEMPT_TIMER_TSC);
1446 if (cPreemptTimerShift == pVM->hm.s.vmx.cPreemptTimerShift)
1447 LogRel(("HM: PREEMPT_TIMER_TSC = %#x\n", cPreemptTimerShift));
1448 else
1449 {
1450 LogRel(("HM: PREEMPT_TIMER_TSC = %#x - erratum detected, using %#x instead\n", cPreemptTimerShift,
1451 pVM->hm.s.vmx.cPreemptTimerShift));
1452 }
1453 LogRel(("HM: EXIT_SAVE_EFER_LMA = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_EXIT_SAVE_EFER_LMA)));
1454 LogRel(("HM: ACTIVITY_STATES = %#x%s\n", RT_BF_GET(fMisc, VMX_BF_MISC_ACTIVITY_STATES),
1455 hmR3VmxGetActivityStateAllDesc(fMisc)));
1456 LogRel(("HM: INTEL_PT = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_INTEL_PT)));
1457 LogRel(("HM: SMM_READ_SMBASE_MSR = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_SMM_READ_SMBASE_MSR)));
1458 LogRel(("HM: CR3_TARGET = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_CR3_TARGET)));
1459 LogRel(("HM: MAX_MSR = %#x ( %u )\n", RT_BF_GET(fMisc, VMX_BF_MISC_MAX_MSRS),
1460 VMX_MISC_MAX_MSRS(fMisc)));
1461 LogRel(("HM: VMXOFF_BLOCK_SMI = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_VMXOFF_BLOCK_SMI)));
1462 LogRel(("HM: VMWRITE_ALL = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_VMWRITE_ALL)));
1463 LogRel(("HM: ENTRY_INJECT_SOFT_INT = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_ENTRY_INJECT_SOFT_INT)));
1464 LogRel(("HM: MSEG_ID = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_MSEG_ID)));
1465}
1466
1467
1468/**
1469 * Reports MSR_IA32_VMX_VMCS_ENUM MSR to the log.
1470 *
1471 * @param uVmcsEnum The VMX VMCS enum MSR value.
1472 */
1473static void hmR3VmxReportVmcsEnumMsr(uint64_t uVmcsEnum)
1474{
1475 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %#RX64\n", uVmcsEnum));
1476 LogRel(("HM: HIGHEST_IDX = %#x\n", RT_BF_GET(uVmcsEnum, VMX_BF_VMCS_ENUM_HIGHEST_IDX)));
1477}
1478
1479
1480/**
1481 * Reports MSR_IA32_VMX_VMFUNC MSR to the log.
1482 *
1483 * @param uVmFunc The VMX VMFUNC MSR value.
1484 */
1485static void hmR3VmxReportVmFuncMsr(uint64_t uVmFunc)
1486{
1487 LogRel(("HM: MSR_IA32_VMX_VMFUNC = %#RX64\n", uVmFunc));
1488 HMVMX_REPORT_ALLOWED_FEAT(uVmFunc, "EPTP_SWITCHING", RT_BF_GET(uVmFunc, VMX_BF_VMFUNC_EPTP_SWITCHING));
1489}
1490
1491
1492/**
1493 * Reports VMX CR0, CR4 fixed MSRs.
1494 *
1495 * @param pMsrs Pointer to the VMX MSRs.
1496 */
1497static void hmR3VmxReportCrFixedMsrs(PVMXMSRS pMsrs)
1498{
1499 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %#RX64\n", pMsrs->u64Cr0Fixed0));
1500 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %#RX64\n", pMsrs->u64Cr0Fixed1));
1501 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %#RX64\n", pMsrs->u64Cr4Fixed0));
1502 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %#RX64\n", pMsrs->u64Cr4Fixed1));
1503}
1504
1505
1506/**
1507 * Finish VT-x initialization (after ring-0 init).
1508 *
1509 * @returns VBox status code.
1510 * @param pVM The cross context VM structure.
1511 */
1512static int hmR3InitFinalizeR0Intel(PVM pVM)
1513{
1514 int rc;
1515
1516 LogFunc(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
1517 AssertLogRelReturn(pVM->hm.s.ForR3.vmx.u64HostFeatCtrl != 0, VERR_HM_IPE_4);
1518
1519 LogRel(("HM: Using VT-x implementation 3.0\n"));
1520 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoopsCfg));
1521 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostCr4));
1522 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostMsrEfer));
1523 LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostSmmMonitorCtl));
1524
1525 hmR3VmxReportFeatCtlMsr(pVM->hm.s.ForR3.vmx.u64HostFeatCtrl);
1526 hmR3VmxReportBasicMsr(pVM->hm.s.ForR3.vmx.Msrs.u64Basic);
1527
1528 hmR3VmxReportPinBasedCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.PinCtls);
1529 hmR3VmxReportProcBasedCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.ProcCtls);
1530 if (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1531 hmR3VmxReportProcBasedCtls2Msr(&pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2);
1532 if (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
1533 hmR3VmxReportProcBasedCtls3Msr(pVM->hm.s.ForR3.vmx.Msrs.u64ProcCtls3);
1534
1535 hmR3VmxReportEntryCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.EntryCtls);
1536 hmR3VmxReportExitCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.ExitCtls);
1537
1538 if (RT_BF_GET(pVM->hm.s.ForR3.vmx.Msrs.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
1539 {
1540 /* We don't extensively dump the true capability MSRs as we don't use them, see @bugref{9180#c5}. */
1541 LogRel(("HM: MSR_IA32_VMX_TRUE_PINBASED_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TruePinCtls));
1542 LogRel(("HM: MSR_IA32_VMX_TRUE_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueProcCtls));
1543 LogRel(("HM: MSR_IA32_VMX_TRUE_ENTRY_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueEntryCtls));
1544 LogRel(("HM: MSR_IA32_VMX_TRUE_EXIT_CTLS = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueExitCtls));
1545 }
1546
1547 hmR3VmxReportMiscMsr(pVM, pVM->hm.s.ForR3.vmx.Msrs.u64Misc);
1548 hmR3VmxReportVmcsEnumMsr(pVM->hm.s.ForR3.vmx.Msrs.u64VmcsEnum);
1549 if (pVM->hm.s.ForR3.vmx.Msrs.u64EptVpidCaps)
1550 hmR3VmxReportEptVpidCapsMsr(pVM->hm.s.ForR3.vmx.Msrs.u64EptVpidCaps);
1551 if (pVM->hm.s.ForR3.vmx.Msrs.u64VmFunc)
1552 hmR3VmxReportVmFuncMsr(pVM->hm.s.ForR3.vmx.Msrs.u64VmFunc);
1553 hmR3VmxReportCrFixedMsrs(&pVM->hm.s.ForR3.vmx.Msrs);
1554
1555#ifdef TODO_9217_VMCSINFO
1556 LogRel(("HM: APIC-access page physaddr = %#RHp\n", pVM->hm.s.vmx.HCPhysApicAccess));
1557 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1558 {
1559 PCVMXVMCSINFOSHARED pVmcsInfo = &pVM->apCpusR3[idCpu]->hm.s.vmx.VmcsInfo;
1560 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", idCpu, pVmcsInfo->HCPhysMsrBitmap));
1561 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", idCpu, pVmcsInfo->HCPhysVmcs));
1562 }
1563#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1564 if (pVM->cpum.ro.GuestFeatures.fVmx)
1565 {
1566 LogRel(("HM: Nested-guest:\n"));
1567 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1568 {
1569 PCVMXVMCSINFOSHARED pVmcsInfoNstGst = &pVM->apCpusR3[idCpu]->hm.s.vmx.VmcsInfoNstGst;
1570 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", idCpu, pVmcsInfoNstGst->HCPhysMsrBitmap));
1571 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", idCpu, pVmcsInfoNstGst->HCPhysVmcs));
1572 }
1573 }
1574#endif
1575#endif /* TODO_9217_VMCSINFO */
1576
1577 /*
1578 * EPT and unrestricted guest execution are determined in HMR3Init, verify the sanity of that.
1579 */
1580 AssertLogRelReturn( !pVM->hm.s.fNestedPagingCfg
1581 || (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_EPT),
1582 VERR_HM_IPE_1);
1583 AssertLogRelReturn( !pVM->hm.s.vmx.fUnrestrictedGuestCfg
1584 || ( (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
1585 && pVM->hm.s.fNestedPagingCfg),
1586 VERR_HM_IPE_1);
1587
1588 /*
1589 * Disallow RDTSCP in the guest if there is no secondary process-based VM execution controls as otherwise
1590 * RDTSCP would cause a #UD. There might be no CPUs out there where this happens, as RDTSCP was introduced
1591 * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel...
1592 */
1593 if ( !(pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1594 && CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
1595 {
1596 CPUMR3ClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
1597 LogRel(("HM: Disabled RDTSCP\n"));
1598 }
1599
1600 if (!pVM->hm.s.vmx.fUnrestrictedGuestCfg)
1601 {
1602 /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
1603 rc = PDMR3VmmDevHeapAlloc(pVM, HM_VTX_TOTAL_DEVHEAP_MEM, hmR3VmmDevHeapNotify, (RTR3PTR *)&pVM->hm.s.vmx.pRealModeTSS);
1604 if (RT_SUCCESS(rc))
1605 {
1606 /* The IO bitmap starts right after the virtual interrupt redirection bitmap.
1607 Refer Intel spec. 20.3.3 "Software Interrupt Handling in Virtual-8086 mode"
1608 esp. Figure 20-5.*/
1609 ASMMemZero32(pVM->hm.s.vmx.pRealModeTSS, sizeof(*pVM->hm.s.vmx.pRealModeTSS));
1610 pVM->hm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hm.s.vmx.pRealModeTSS);
1611
1612 /* Bit set to 0 means software interrupts are redirected to the
1613 8086 program interrupt handler rather than switching to
1614 protected-mode handler. */
1615 memset(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap, 0, sizeof(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap));
1616
1617 /* Allow all port IO, so that port IO instructions do not cause
1618 exceptions and would instead cause a VM-exit (based on VT-x's
1619 IO bitmap which we currently configure to always cause an exit). */
1620 memset(pVM->hm.s.vmx.pRealModeTSS + 1, 0, X86_PAGE_SIZE * 2);
1621 *((unsigned char *)pVM->hm.s.vmx.pRealModeTSS + HM_VTX_TSS_SIZE - 2) = 0xff;
1622
1623 /*
1624 * Construct a 1024 element page directory with 4 MB pages for the identity mapped
1625 * page table used in real and protected mode without paging with EPT.
1626 */
1627 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hm.s.vmx.pRealModeTSS + X86_PAGE_SIZE * 3);
1628 for (uint32_t i = 0; i < X86_PG_ENTRIES; i++)
1629 {
1630 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
1631 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US
1632 | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS
1633 | X86_PDE4M_G;
1634 }
1635
1636 /* We convert it here every time as PCI regions could be reconfigured. */
1637 if (PDMVmmDevHeapIsEnabled(pVM))
1638 {
1639 RTGCPHYS GCPhys;
1640 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
1641 AssertRCReturn(rc, rc);
1642 LogRel(("HM: Real Mode TSS guest physaddr = %#RGp\n", GCPhys));
1643
1644 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
1645 AssertRCReturn(rc, rc);
1646 LogRel(("HM: Non-Paging Mode EPT CR3 = %#RGp\n", GCPhys));
1647 }
1648 }
1649 else
1650 {
1651 LogRel(("HM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
1652 pVM->hm.s.vmx.pRealModeTSS = NULL;
1653 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = NULL;
1654 return VMSetError(pVM, rc, RT_SRC_POS,
1655 "HM failure: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)", rc);
1656 }
1657 }
1658
1659 LogRel((pVM->hm.s.fAllow64BitGuestsCfg ? "HM: Guest support: 32-bit and 64-bit\n"
1660 : "HM: Guest support: 32-bit only\n"));
1661
1662 /*
1663 * Call ring-0 to set up the VM.
1664 */
1665 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /* idCpu */, VMMR0_DO_HM_SETUP_VM, 0 /* u64Arg */, NULL /* pReqHdr */);
1666 if (rc != VINF_SUCCESS)
1667 {
1668 LogRel(("HM: VMX setup failed with rc=%Rrc!\n", rc));
1669 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1670 {
1671 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1672 LogRel(("HM: CPU[%u] Last instruction error %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32InstrError));
1673 LogRel(("HM: CPU[%u] HM error %#x (%u)\n", idCpu, pVCpu->hm.s.u32HMError, pVCpu->hm.s.u32HMError));
1674 }
1675 HMR3CheckError(pVM, rc);
1676 return VMSetError(pVM, rc, RT_SRC_POS, "VT-x setup failed: %Rrc", rc);
1677 }
1678
1679 LogRel(("HM: Supports VMCS EFER fields = %RTbool\n", pVM->hm.s.ForR3.vmx.fSupportsVmcsEfer));
1680 LogRel(("HM: Enabled VMX\n"));
1681 pVM->hm.s.vmx.fEnabled = true;
1682
1683 hmR3DisableRawMode(pVM); /** @todo make this go away! */
1684
1685 /*
1686 * Change the CPU features.
1687 */
1688 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1689 if (pVM->hm.s.fAllow64BitGuestsCfg)
1690 {
1691 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1692 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1693 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* (Long mode only on Intel CPUs.) */
1694 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1695 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1696 }
1697 /* Given that we're on a long mode host, we can simply enable NX for PAE capable guests. */
1698 else if (CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1699 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1700
1701 /*
1702 * Log configuration details.
1703 */
1704 if (pVM->hm.s.fNestedPagingCfg)
1705 {
1706 LogRel(("HM: Enabled nested paging\n"));
1707 if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_SINGLE_CONTEXT)
1708 LogRel(("HM: EPT flush type = Single context\n"));
1709 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_ALL_CONTEXTS)
1710 LogRel(("HM: EPT flush type = All contexts\n"));
1711 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_NOT_SUPPORTED)
1712 LogRel(("HM: EPT flush type = Not supported\n"));
1713 else
1714 LogRel(("HM: EPT flush type = %#x\n", pVM->hm.s.ForR3.vmx.enmTlbFlushEpt));
1715
1716 if (pVM->hm.s.vmx.fUnrestrictedGuestCfg)
1717 LogRel(("HM: Enabled unrestricted guest execution\n"));
1718
1719 if (pVM->hm.s.fLargePages)
1720 {
1721 /* Use large (2 MB) pages for our EPT PDEs where possible. */
1722 PGMSetLargePageUsage(pVM, true);
1723 LogRel(("HM: Enabled large page support\n"));
1724 }
1725 }
1726 else
1727 Assert(!pVM->hm.s.vmx.fUnrestrictedGuestCfg);
1728
1729 if (pVM->hm.s.ForR3.vmx.fVpid)
1730 {
1731 LogRel(("HM: Enabled VPID\n"));
1732 if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_INDIV_ADDR)
1733 LogRel(("HM: VPID flush type = Individual addresses\n"));
1734 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
1735 LogRel(("HM: VPID flush type = Single context\n"));
1736 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS)
1737 LogRel(("HM: VPID flush type = All contexts\n"));
1738 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1739 LogRel(("HM: VPID flush type = Single context retain globals\n"));
1740 else
1741 LogRel(("HM: VPID flush type = %#x\n", pVM->hm.s.ForR3.vmx.enmTlbFlushVpid));
1742 }
1743 else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_NOT_SUPPORTED)
1744 LogRel(("HM: Ignoring VPID capabilities of CPU\n"));
1745
1746 if (pVM->hm.s.vmx.fUsePreemptTimerCfg)
1747 LogRel(("HM: Enabled VMX-preemption timer (cPreemptTimerShift=%u)\n", pVM->hm.s.vmx.cPreemptTimerShift));
1748 else
1749 LogRel(("HM: Disabled VMX-preemption timer\n"));
1750
1751 if (pVM->hm.s.fVirtApicRegs)
1752 LogRel(("HM: Enabled APIC-register virtualization support\n"));
1753
1754 if (pVM->hm.s.fPostedIntrs)
1755 LogRel(("HM: Enabled posted-interrupt processing support\n"));
1756
1757 if (pVM->hm.s.ForR3.vmx.fUseVmcsShadowing)
1758 {
1759 bool const fFullVmcsShadow = RT_BOOL(pVM->hm.s.ForR3.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL);
1760 LogRel(("HM: Enabled %s VMCS shadowing\n", fFullVmcsShadow ? "full" : "partial"));
1761 }
1762
1763 return VINF_SUCCESS;
1764}
1765
1766
1767/**
1768 * Finish AMD-V initialization (after ring-0 init).
1769 *
1770 * @returns VBox status code.
1771 * @param pVM The cross context VM structure.
1772 */
1773static int hmR3InitFinalizeR0Amd(PVM pVM)
1774{
1775 LogFunc(("pVM->hm.s.svm.fSupported = %d\n", pVM->hm.s.svm.fSupported));
1776
1777 LogRel(("HM: Using AMD-V implementation 2.0\n"));
1778
1779 uint32_t u32Family;
1780 uint32_t u32Model;
1781 uint32_t u32Stepping;
1782 if (HMIsSubjectToSvmErratum170(&u32Family, &u32Model, &u32Stepping))
1783 LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
1784 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoopsCfg));
1785 LogRel(("HM: AMD HWCR MSR = %#RX64\n", pVM->hm.s.ForR3.svm.u64MsrHwcr));
1786 LogRel(("HM: AMD-V revision = %#x\n", pVM->hm.s.ForR3.svm.u32Rev));
1787 LogRel(("HM: AMD-V max ASID = %RU32\n", pVM->hm.s.ForR3.uMaxAsid));
1788 LogRel(("HM: AMD-V features = %#x\n", pVM->hm.s.ForR3.svm.fFeatures));
1789
1790 /*
1791 * Enumerate AMD-V features.
1792 */
1793 static const struct { uint32_t fFlag; const char *pszName; } s_aSvmFeatures[] =
1794 {
1795#define HMSVM_REPORT_FEATURE(a_StrDesc, a_Define) { a_Define, a_StrDesc }
1796 HMSVM_REPORT_FEATURE("NESTED_PAGING", X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
1797 HMSVM_REPORT_FEATURE("LBR_VIRT", X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
1798 HMSVM_REPORT_FEATURE("SVM_LOCK", X86_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
1799 HMSVM_REPORT_FEATURE("NRIP_SAVE", X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
1800 HMSVM_REPORT_FEATURE("TSC_RATE_MSR", X86_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
1801 HMSVM_REPORT_FEATURE("VMCB_CLEAN", X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
1802 HMSVM_REPORT_FEATURE("FLUSH_BY_ASID", X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
1803 HMSVM_REPORT_FEATURE("DECODE_ASSISTS", X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS),
1804 HMSVM_REPORT_FEATURE("PAUSE_FILTER", X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
1805 HMSVM_REPORT_FEATURE("PAUSE_FILTER_THRESHOLD", X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD),
1806 HMSVM_REPORT_FEATURE("AVIC", X86_CPUID_SVM_FEATURE_EDX_AVIC),
1807 HMSVM_REPORT_FEATURE("VIRT_VMSAVE_VMLOAD", X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD),
1808 HMSVM_REPORT_FEATURE("VGIF", X86_CPUID_SVM_FEATURE_EDX_VGIF),
1809 HMSVM_REPORT_FEATURE("GMET", X86_CPUID_SVM_FEATURE_EDX_GMET),
1810#undef HMSVM_REPORT_FEATURE
1811 };
1812
1813 uint32_t fSvmFeatures = pVM->hm.s.ForR3.svm.fFeatures;
1814 for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++)
1815 if (fSvmFeatures & s_aSvmFeatures[i].fFlag)
1816 {
1817 LogRel(("HM: %s\n", s_aSvmFeatures[i].pszName));
1818 fSvmFeatures &= ~s_aSvmFeatures[i].fFlag;
1819 }
1820 if (fSvmFeatures)
1821 for (unsigned iBit = 0; iBit < 32; iBit++)
1822 if (RT_BIT_32(iBit) & fSvmFeatures)
1823 LogRel(("HM: Reserved bit %u\n", iBit));
1824
1825 /*
1826 * Nested paging is determined in HMR3Init, verify the sanity of that.
1827 */
1828 AssertLogRelReturn( !pVM->hm.s.fNestedPagingCfg
1829 || (pVM->hm.s.ForR3.svm.fFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
1830 VERR_HM_IPE_1);
1831
1832#if 0
1833 /** @todo Add and query IPRT API for host OS support for posted-interrupt IPI
1834 * here. */
1835 if (RTR0IsPostIpiSupport())
1836 pVM->hm.s.fPostedIntrs = true;
1837#endif
1838
1839 /*
1840 * Determine whether we need to intercept #UD in SVM mode for emulating
1841 * intel SYSENTER/SYSEXIT on AMD64, as these instructions results in #UD
1842 * when executed in long-mode. This is only really applicable when
1843 * non-default CPU profiles are in effect, i.e. guest vendor differs
1844 * from the host one.
1845 */
1846 if (CPUMGetGuestCpuVendor(pVM) != CPUMGetHostCpuVendor(pVM))
1847 switch (CPUMGetGuestCpuVendor(pVM))
1848 {
1849 case CPUMCPUVENDOR_INTEL:
1850 case CPUMCPUVENDOR_VIA: /*?*/
1851 case CPUMCPUVENDOR_SHANGHAI: /*?*/
1852 switch (CPUMGetHostCpuVendor(pVM))
1853 {
1854 case CPUMCPUVENDOR_AMD:
1855 case CPUMCPUVENDOR_HYGON:
1856 if (pVM->hm.s.fAllow64BitGuestsCfg)
1857 {
1858 LogRel(("HM: Intercepting #UD for emulating SYSENTER/SYSEXIT in long mode.\n"));
1859 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1860 pVM->apCpusR3[idCpu]->hm.s.svm.fEmulateLongModeSysEnterExit = true;
1861 }
1862 break;
1863 default: break;
1864 }
1865 default: break;
1866 }
1867
1868 /*
1869 * Call ring-0 to set up the VM.
1870 */
1871 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_HM_SETUP_VM, 0, NULL);
1872 if (rc != VINF_SUCCESS)
1873 {
1874 AssertMsgFailed(("%Rrc\n", rc));
1875 LogRel(("HM: AMD-V setup failed with rc=%Rrc!\n", rc));
1876 return VMSetError(pVM, rc, RT_SRC_POS, "AMD-V setup failed: %Rrc", rc);
1877 }
1878
1879 LogRel(("HM: Enabled SVM\n"));
1880 pVM->hm.s.svm.fEnabled = true;
1881
1882 if (pVM->hm.s.fNestedPagingCfg)
1883 {
1884 LogRel(("HM: Enabled nested paging\n"));
1885
1886 /*
1887 * Enable large pages (2 MB) if applicable.
1888 */
1889 if (pVM->hm.s.fLargePages)
1890 {
1891 PGMSetLargePageUsage(pVM, true);
1892 LogRel(("HM: Enabled large page support\n"));
1893 }
1894 }
1895
1896 if (pVM->hm.s.fVirtApicRegs)
1897 LogRel(("HM: Enabled APIC-register virtualization support\n"));
1898
1899 if (pVM->hm.s.fPostedIntrs)
1900 LogRel(("HM: Enabled posted-interrupt processing support\n"));
1901
1902 hmR3DisableRawMode(pVM);
1903
1904 /*
1905 * Change the CPU features.
1906 */
1907 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1908 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
1909 if (pVM->hm.s.fAllow64BitGuestsCfg)
1910 {
1911 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1912 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1913 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1914 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1915 }
1916 /* Turn on NXE if PAE has been enabled. */
1917 else if (CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1918 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1919
1920 LogRel((pVM->hm.s.fTprPatchingAllowed ? "HM: Enabled TPR patching\n"
1921 : "HM: Disabled TPR patching\n"));
1922
1923 LogRel((pVM->hm.s.fAllow64BitGuestsCfg ? "HM: Guest support: 32-bit and 64-bit\n"
1924 : "HM: Guest support: 32-bit only\n"));
1925 return VINF_SUCCESS;
1926}
1927
1928
1929/**
1930 * Applies relocations to data and code managed by this
1931 * component. This function will be called at init and
1932 * whenever the VMM need to relocate it self inside the GC.
1933 *
1934 * @param pVM The cross context VM structure.
1935 */
1936VMMR3_INT_DECL(void) HMR3Relocate(PVM pVM)
1937{
1938 /* Fetch the current paging mode during the relocate callback during state loading. */
1939 if (VMR3GetState(pVM) == VMSTATE_LOADING)
1940 {
1941 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1942 {
1943 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1944 pVCpu->hm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
1945 }
1946 }
1947}
1948
1949
1950/**
1951 * Terminates the HM.
1952 *
1953 * Termination means cleaning up and freeing all resources,
1954 * the VM itself is, at this point, powered off or suspended.
1955 *
1956 * @returns VBox status code.
1957 * @param pVM The cross context VM structure.
1958 */
1959VMMR3_INT_DECL(int) HMR3Term(PVM pVM)
1960{
1961 if (pVM->hm.s.vmx.pRealModeTSS)
1962 {
1963 PDMR3VmmDevHeapFree(pVM, pVM->hm.s.vmx.pRealModeTSS);
1964 pVM->hm.s.vmx.pRealModeTSS = 0;
1965 }
1966 hmR3TermCPU(pVM);
1967 return 0;
1968}
1969
1970
1971/**
1972 * Terminates the per-VCPU HM.
1973 *
1974 * @returns VBox status code.
1975 * @param pVM The cross context VM structure.
1976 */
1977static int hmR3TermCPU(PVM pVM)
1978{
1979 RT_NOREF(pVM);
1980 return VINF_SUCCESS;
1981}
1982
1983
1984/**
1985 * Resets a virtual CPU.
1986 *
1987 * Used by HMR3Reset and CPU hot plugging.
1988 *
1989 * @param pVCpu The cross context virtual CPU structure to reset.
1990 */
1991VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu)
1992{
1993 /* Sync. entire state on VM reset ring-0 re-entry. It's safe to reset
1994 the HM flags here, all other EMTs are in ring-3. See VMR3Reset(). */
1995 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
1996
1997 pVCpu->hm.s.fActive = false;
1998 pVCpu->hm.s.Event.fPending = false;
1999 pVCpu->hm.s.vmx.u64GstMsrApicBase = 0;
2000 pVCpu->hm.s.vmx.VmcsInfo.fWasInRealMode = true;
2001#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2002 if (pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmx)
2003 pVCpu->hm.s.vmx.VmcsInfoNstGst.fWasInRealMode = true;
2004#endif
2005}
2006
2007
2008/**
2009 * The VM is being reset.
2010 *
2011 * For the HM component this means that any GDT/LDT/TSS monitors
2012 * needs to be removed.
2013 *
2014 * @param pVM The cross context VM structure.
2015 */
2016VMMR3_INT_DECL(void) HMR3Reset(PVM pVM)
2017{
2018 LogFlow(("HMR3Reset:\n"));
2019
2020 if (HMIsEnabled(pVM))
2021 hmR3DisableRawMode(pVM);
2022
2023 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2024 HMR3ResetCpu(pVM->apCpusR3[idCpu]);
2025
2026 /* Clear all patch information. */
2027 pVM->hm.s.pGuestPatchMem = 0;
2028 pVM->hm.s.pFreeGuestPatchMem = 0;
2029 pVM->hm.s.cbGuestPatchMem = 0;
2030 pVM->hm.s.cPatches = 0;
2031 pVM->hm.s.PatchTree = 0;
2032 pVM->hm.s.fTprPatchingActive = false;
2033 ASMMemZero32(pVM->hm.s.aPatches, sizeof(pVM->hm.s.aPatches));
2034}
2035
2036
2037/**
2038 * Callback to patch a TPR instruction (vmmcall or mov cr8).
2039 *
2040 * @returns VBox strict status code.
2041 * @param pVM The cross context VM structure.
2042 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2043 * @param pvUser Unused.
2044 */
2045static DECLCALLBACK(VBOXSTRICTRC) hmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
2046{
2047 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2048
2049 /* Only execute the handler on the VCPU the original patch request was issued. */
2050 if (pVCpu->idCpu != idCpu)
2051 return VINF_SUCCESS;
2052
2053 Log(("hmR3RemovePatches\n"));
2054 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
2055 {
2056 uint8_t abInstr[15];
2057 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
2058 RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
2059 int rc;
2060
2061#ifdef LOG_ENABLED
2062 char szOutput[256];
2063 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2064 szOutput, sizeof(szOutput), NULL);
2065 if (RT_SUCCESS(rc))
2066 Log(("Patched instr: %s\n", szOutput));
2067#endif
2068
2069 /* Check if the instruction is still the same. */
2070 rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pInstrGC, pPatch->cbNewOp);
2071 if (rc != VINF_SUCCESS)
2072 {
2073 Log(("Patched code removed? (rc=%Rrc0\n", rc));
2074 continue; /* swapped out or otherwise removed; skip it. */
2075 }
2076
2077 if (memcmp(abInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
2078 {
2079 Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
2080 continue; /* skip it. */
2081 }
2082
2083 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
2084 AssertRC(rc);
2085
2086#ifdef LOG_ENABLED
2087 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2088 szOutput, sizeof(szOutput), NULL);
2089 if (RT_SUCCESS(rc))
2090 Log(("Original instr: %s\n", szOutput));
2091#endif
2092 }
2093 pVM->hm.s.cPatches = 0;
2094 pVM->hm.s.PatchTree = 0;
2095 pVM->hm.s.pFreeGuestPatchMem = pVM->hm.s.pGuestPatchMem;
2096 pVM->hm.s.fTprPatchingActive = false;
2097 return VINF_SUCCESS;
2098}
2099
2100
2101/**
2102 * Worker for enabling patching in a VT-x/AMD-V guest.
2103 *
2104 * @returns VBox status code.
2105 * @param pVM The cross context VM structure.
2106 * @param idCpu VCPU to execute hmR3RemovePatches on.
2107 * @param pPatchMem Patch memory range.
2108 * @param cbPatchMem Size of the memory range.
2109 */
2110static DECLCALLBACK(int) hmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
2111{
2112 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches, (void *)(uintptr_t)idCpu);
2113 AssertRC(rc);
2114
2115 pVM->hm.s.pGuestPatchMem = pPatchMem;
2116 pVM->hm.s.pFreeGuestPatchMem = pPatchMem;
2117 pVM->hm.s.cbGuestPatchMem = cbPatchMem;
2118 return VINF_SUCCESS;
2119}
2120
2121
2122/**
2123 * Enable patching in a VT-x/AMD-V guest
2124 *
2125 * @returns VBox status code.
2126 * @param pVM The cross context VM structure.
2127 * @param pPatchMem Patch memory range.
2128 * @param cbPatchMem Size of the memory range.
2129 */
2130VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
2131{
2132 VM_ASSERT_EMT(pVM);
2133 Log(("HMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
2134 if (pVM->cCpus > 1)
2135 {
2136 /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
2137 int rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE,
2138 (PFNRT)hmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
2139 AssertRC(rc);
2140 return rc;
2141 }
2142 return hmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
2143}
2144
2145
2146/**
2147 * Disable patching in a VT-x/AMD-V guest.
2148 *
2149 * @returns VBox status code.
2150 * @param pVM The cross context VM structure.
2151 * @param pPatchMem Patch memory range.
2152 * @param cbPatchMem Size of the memory range.
2153 */
2154VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
2155{
2156 Log(("HMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
2157 RT_NOREF2(pPatchMem, cbPatchMem);
2158
2159 Assert(pVM->hm.s.pGuestPatchMem == pPatchMem);
2160 Assert(pVM->hm.s.cbGuestPatchMem == cbPatchMem);
2161
2162 /** @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
2163 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches,
2164 (void *)(uintptr_t)VMMGetCpuId(pVM));
2165 AssertRC(rc);
2166
2167 pVM->hm.s.pGuestPatchMem = 0;
2168 pVM->hm.s.pFreeGuestPatchMem = 0;
2169 pVM->hm.s.cbGuestPatchMem = 0;
2170 pVM->hm.s.fTprPatchingActive = false;
2171 return VINF_SUCCESS;
2172}
2173
2174
2175/**
2176 * Callback to patch a TPR instruction (vmmcall or mov cr8).
2177 *
2178 * @returns VBox strict status code.
2179 * @param pVM The cross context VM structure.
2180 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2181 * @param pvUser User specified CPU context.
2182 *
2183 */
2184static DECLCALLBACK(VBOXSTRICTRC) hmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
2185{
2186 /*
2187 * Only execute the handler on the VCPU the original patch request was
2188 * issued. (The other CPU(s) might not yet have switched to protected
2189 * mode, nor have the correct memory context.)
2190 */
2191 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2192 if (pVCpu->idCpu != idCpu)
2193 return VINF_SUCCESS;
2194
2195 /*
2196 * We're racing other VCPUs here, so don't try patch the instruction twice
2197 * and make sure there is still room for our patch record.
2198 */
2199 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2200 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
2201 if (pPatch)
2202 {
2203 Log(("hmR3ReplaceTprInstr: already patched %RGv\n", pCtx->rip));
2204 return VINF_SUCCESS;
2205 }
2206 uint32_t const idx = pVM->hm.s.cPatches;
2207 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
2208 {
2209 Log(("hmR3ReplaceTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
2210 return VINF_SUCCESS;
2211 }
2212 pPatch = &pVM->hm.s.aPatches[idx];
2213
2214 Log(("hmR3ReplaceTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
2215
2216 /*
2217 * Disassembler the instruction and get cracking.
2218 */
2219 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3ReplaceTprInstr");
2220 DISCPUSTATE Dis;
2221 uint32_t cbOp;
2222 int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbOp);
2223 AssertRC(rc);
2224 if ( rc == VINF_SUCCESS
2225 && Dis.pCurInstr->uOpcode == OP_MOV
2226 && cbOp >= 3)
2227 {
2228 static uint8_t const s_abVMMCall[3] = { 0x0f, 0x01, 0xd9 };
2229
2230 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
2231 AssertRC(rc);
2232
2233 pPatch->cbOp = cbOp;
2234
2235 if (Dis.Param1.fUse == DISUSE_DISPLACEMENT32)
2236 {
2237 /* write. */
2238 if (Dis.Param2.fUse == DISUSE_REG_GEN32)
2239 {
2240 pPatch->enmType = HMTPRINSTR_WRITE_REG;
2241 pPatch->uSrcOperand = Dis.Param2.Base.idxGenReg;
2242 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_REG %u\n", Dis.Param2.Base.idxGenReg));
2243 }
2244 else
2245 {
2246 Assert(Dis.Param2.fUse == DISUSE_IMMEDIATE32);
2247 pPatch->enmType = HMTPRINSTR_WRITE_IMM;
2248 pPatch->uSrcOperand = Dis.Param2.uValue;
2249 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_IMM %#llx\n", Dis.Param2.uValue));
2250 }
2251 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
2252 AssertRC(rc);
2253
2254 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
2255 pPatch->cbNewOp = sizeof(s_abVMMCall);
2256 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessVmc);
2257 }
2258 else
2259 {
2260 /*
2261 * TPR Read.
2262 *
2263 * Found:
2264 * mov eax, dword [fffe0080] (5 bytes)
2265 * Check if next instruction is:
2266 * shr eax, 4
2267 */
2268 Assert(Dis.Param1.fUse == DISUSE_REG_GEN32);
2269
2270 uint8_t const idxMmioReg = Dis.Param1.Base.idxGenReg;
2271 uint8_t const cbOpMmio = cbOp;
2272 uint64_t const uSavedRip = pCtx->rip;
2273
2274 pCtx->rip += cbOp;
2275 rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbOp);
2276 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Following read");
2277 pCtx->rip = uSavedRip;
2278
2279 if ( rc == VINF_SUCCESS
2280 && Dis.pCurInstr->uOpcode == OP_SHR
2281 && Dis.Param1.fUse == DISUSE_REG_GEN32
2282 && Dis.Param1.Base.idxGenReg == idxMmioReg
2283 && Dis.Param2.fUse == DISUSE_IMMEDIATE8
2284 && Dis.Param2.uValue == 4
2285 && cbOpMmio + cbOp < sizeof(pVM->hm.s.aPatches[idx].aOpcode))
2286 {
2287 uint8_t abInstr[15];
2288
2289 /* Replacing the two instructions above with an AMD-V specific lock-prefixed 32-bit MOV CR8 instruction so as to
2290 access CR8 in 32-bit mode and not cause a #VMEXIT. */
2291 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, cbOpMmio + cbOp);
2292 AssertRC(rc);
2293
2294 pPatch->cbOp = cbOpMmio + cbOp;
2295
2296 /* 0xf0, 0x0f, 0x20, 0xc0 = mov eax, cr8 */
2297 abInstr[0] = 0xf0;
2298 abInstr[1] = 0x0f;
2299 abInstr[2] = 0x20;
2300 abInstr[3] = 0xc0 | Dis.Param1.Base.idxGenReg;
2301 for (unsigned i = 4; i < pPatch->cbOp; i++)
2302 abInstr[i] = 0x90; /* nop */
2303
2304 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, abInstr, pPatch->cbOp);
2305 AssertRC(rc);
2306
2307 memcpy(pPatch->aNewOpcode, abInstr, pPatch->cbOp);
2308 pPatch->cbNewOp = pPatch->cbOp;
2309 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessCr8);
2310
2311 Log(("Acceptable read/shr candidate!\n"));
2312 pPatch->enmType = HMTPRINSTR_READ_SHR4;
2313 }
2314 else
2315 {
2316 pPatch->enmType = HMTPRINSTR_READ;
2317 pPatch->uDstOperand = idxMmioReg;
2318
2319 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
2320 AssertRC(rc);
2321
2322 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
2323 pPatch->cbNewOp = sizeof(s_abVMMCall);
2324 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessVmc);
2325 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_READ %u\n", pPatch->uDstOperand));
2326 }
2327 }
2328
2329 pPatch->Core.Key = pCtx->eip;
2330 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2331 AssertRC(rc);
2332
2333 pVM->hm.s.cPatches++;
2334 return VINF_SUCCESS;
2335 }
2336
2337 /*
2338 * Save invalid patch, so we will not try again.
2339 */
2340 Log(("hmR3ReplaceTprInstr: Failed to patch instr!\n"));
2341 pPatch->Core.Key = pCtx->eip;
2342 pPatch->enmType = HMTPRINSTR_INVALID;
2343 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2344 AssertRC(rc);
2345 pVM->hm.s.cPatches++;
2346 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceFailure);
2347 return VINF_SUCCESS;
2348}
2349
2350
2351/**
2352 * Callback to patch a TPR instruction (jump to generated code).
2353 *
2354 * @returns VBox strict status code.
2355 * @param pVM The cross context VM structure.
2356 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2357 * @param pvUser User specified CPU context.
2358 *
2359 */
2360static DECLCALLBACK(VBOXSTRICTRC) hmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
2361{
2362 /*
2363 * Only execute the handler on the VCPU the original patch request was
2364 * issued. (The other CPU(s) might not yet have switched to protected
2365 * mode, nor have the correct memory context.)
2366 */
2367 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2368 if (pVCpu->idCpu != idCpu)
2369 return VINF_SUCCESS;
2370
2371 /*
2372 * We're racing other VCPUs here, so don't try patch the instruction twice
2373 * and make sure there is still room for our patch record.
2374 */
2375 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2376 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
2377 if (pPatch)
2378 {
2379 Log(("hmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
2380 return VINF_SUCCESS;
2381 }
2382 uint32_t const idx = pVM->hm.s.cPatches;
2383 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
2384 {
2385 Log(("hmR3PatchTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
2386 return VINF_SUCCESS;
2387 }
2388 pPatch = &pVM->hm.s.aPatches[idx];
2389
2390 Log(("hmR3PatchTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
2391 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3PatchTprInstr");
2392
2393 /*
2394 * Disassemble the instruction and get cracking.
2395 */
2396 DISCPUSTATE Dis;
2397 uint32_t cbOp;
2398 int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbOp);
2399 AssertRC(rc);
2400 if ( rc == VINF_SUCCESS
2401 && Dis.pCurInstr->uOpcode == OP_MOV
2402 && cbOp >= 5)
2403 {
2404 uint8_t aPatch[64];
2405 uint32_t off = 0;
2406
2407 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
2408 AssertRC(rc);
2409
2410 pPatch->cbOp = cbOp;
2411 pPatch->enmType = HMTPRINSTR_JUMP_REPLACEMENT;
2412
2413 if (Dis.Param1.fUse == DISUSE_DISPLACEMENT32)
2414 {
2415 /*
2416 * TPR write:
2417 *
2418 * push ECX [51]
2419 * push EDX [52]
2420 * push EAX [50]
2421 * xor EDX,EDX [31 D2]
2422 * mov EAX,EAX [89 C0]
2423 * or
2424 * mov EAX,0000000CCh [B8 CC 00 00 00]
2425 * mov ECX,0C0000082h [B9 82 00 00 C0]
2426 * wrmsr [0F 30]
2427 * pop EAX [58]
2428 * pop EDX [5A]
2429 * pop ECX [59]
2430 * jmp return_address [E9 return_address]
2431 */
2432 bool fUsesEax = (Dis.Param2.fUse == DISUSE_REG_GEN32 && Dis.Param2.Base.idxGenReg == DISGREG_EAX);
2433
2434 aPatch[off++] = 0x51; /* push ecx */
2435 aPatch[off++] = 0x52; /* push edx */
2436 if (!fUsesEax)
2437 aPatch[off++] = 0x50; /* push eax */
2438 aPatch[off++] = 0x31; /* xor edx, edx */
2439 aPatch[off++] = 0xd2;
2440 if (Dis.Param2.fUse == DISUSE_REG_GEN32)
2441 {
2442 if (!fUsesEax)
2443 {
2444 aPatch[off++] = 0x89; /* mov eax, src_reg */
2445 aPatch[off++] = MAKE_MODRM(3, Dis.Param2.Base.idxGenReg, DISGREG_EAX);
2446 }
2447 }
2448 else
2449 {
2450 Assert(Dis.Param2.fUse == DISUSE_IMMEDIATE32);
2451 aPatch[off++] = 0xb8; /* mov eax, immediate */
2452 *(uint32_t *)&aPatch[off] = Dis.Param2.uValue;
2453 off += sizeof(uint32_t);
2454 }
2455 aPatch[off++] = 0xb9; /* mov ecx, 0xc0000082 */
2456 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2457 off += sizeof(uint32_t);
2458
2459 aPatch[off++] = 0x0f; /* wrmsr */
2460 aPatch[off++] = 0x30;
2461 if (!fUsesEax)
2462 aPatch[off++] = 0x58; /* pop eax */
2463 aPatch[off++] = 0x5a; /* pop edx */
2464 aPatch[off++] = 0x59; /* pop ecx */
2465 }
2466 else
2467 {
2468 /*
2469 * TPR read:
2470 *
2471 * push ECX [51]
2472 * push EDX [52]
2473 * push EAX [50]
2474 * mov ECX,0C0000082h [B9 82 00 00 C0]
2475 * rdmsr [0F 32]
2476 * mov EAX,EAX [89 C0]
2477 * pop EAX [58]
2478 * pop EDX [5A]
2479 * pop ECX [59]
2480 * jmp return_address [E9 return_address]
2481 */
2482 Assert(Dis.Param1.fUse == DISUSE_REG_GEN32);
2483
2484 if (Dis.Param1.Base.idxGenReg != DISGREG_ECX)
2485 aPatch[off++] = 0x51; /* push ecx */
2486 if (Dis.Param1.Base.idxGenReg != DISGREG_EDX )
2487 aPatch[off++] = 0x52; /* push edx */
2488 if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
2489 aPatch[off++] = 0x50; /* push eax */
2490
2491 aPatch[off++] = 0x31; /* xor edx, edx */
2492 aPatch[off++] = 0xd2;
2493
2494 aPatch[off++] = 0xb9; /* mov ecx, 0xc0000082 */
2495 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2496 off += sizeof(uint32_t);
2497
2498 aPatch[off++] = 0x0f; /* rdmsr */
2499 aPatch[off++] = 0x32;
2500
2501 if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
2502 {
2503 aPatch[off++] = 0x89; /* mov dst_reg, eax */
2504 aPatch[off++] = MAKE_MODRM(3, DISGREG_EAX, Dis.Param1.Base.idxGenReg);
2505 }
2506
2507 if (Dis.Param1.Base.idxGenReg != DISGREG_EAX)
2508 aPatch[off++] = 0x58; /* pop eax */
2509 if (Dis.Param1.Base.idxGenReg != DISGREG_EDX )
2510 aPatch[off++] = 0x5a; /* pop edx */
2511 if (Dis.Param1.Base.idxGenReg != DISGREG_ECX)
2512 aPatch[off++] = 0x59; /* pop ecx */
2513 }
2514 aPatch[off++] = 0xe9; /* jmp return_address */
2515 *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem + off + 4);
2516 off += sizeof(RTRCUINTPTR);
2517
2518 if (pVM->hm.s.pFreeGuestPatchMem + off <= pVM->hm.s.pGuestPatchMem + pVM->hm.s.cbGuestPatchMem)
2519 {
2520 /* Write new code to the patch buffer. */
2521 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hm.s.pFreeGuestPatchMem, aPatch, off);
2522 AssertRC(rc);
2523
2524#ifdef LOG_ENABLED
2525 uint32_t cbCurInstr;
2526 for (RTGCPTR GCPtrInstr = pVM->hm.s.pFreeGuestPatchMem;
2527 GCPtrInstr < pVM->hm.s.pFreeGuestPatchMem + off;
2528 GCPtrInstr += RT_MAX(cbCurInstr, 1))
2529 {
2530 char szOutput[256];
2531 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, GCPtrInstr, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2532 szOutput, sizeof(szOutput), &cbCurInstr);
2533 if (RT_SUCCESS(rc))
2534 Log(("Patch instr %s\n", szOutput));
2535 else
2536 Log(("%RGv: rc=%Rrc\n", GCPtrInstr, rc));
2537 }
2538#endif
2539
2540 pPatch->aNewOpcode[0] = 0xE9;
2541 *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
2542
2543 /* Overwrite the TPR instruction with a jump. */
2544 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
2545 AssertRC(rc);
2546
2547 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Jump");
2548
2549 pVM->hm.s.pFreeGuestPatchMem += off;
2550 pPatch->cbNewOp = 5;
2551
2552 pPatch->Core.Key = pCtx->eip;
2553 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2554 AssertRC(rc);
2555
2556 pVM->hm.s.cPatches++;
2557 pVM->hm.s.fTprPatchingActive = true;
2558 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchSuccess);
2559 return VINF_SUCCESS;
2560 }
2561
2562 Log(("Ran out of space in our patch buffer!\n"));
2563 }
2564 else
2565 Log(("hmR3PatchTprInstr: Failed to patch instr!\n"));
2566
2567
2568 /*
2569 * Save invalid patch, so we will not try again.
2570 */
2571 pPatch = &pVM->hm.s.aPatches[idx];
2572 pPatch->Core.Key = pCtx->eip;
2573 pPatch->enmType = HMTPRINSTR_INVALID;
2574 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2575 AssertRC(rc);
2576 pVM->hm.s.cPatches++;
2577 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchFailure);
2578 return VINF_SUCCESS;
2579}
2580
2581
2582/**
2583 * Attempt to patch TPR mmio instructions.
2584 *
2585 * @returns VBox status code.
2586 * @param pVM The cross context VM structure.
2587 * @param pVCpu The cross context virtual CPU structure.
2588 */
2589VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu)
2590{
2591 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE,
2592 pVM->hm.s.pGuestPatchMem ? hmR3PatchTprInstr : hmR3ReplaceTprInstr,
2593 (void *)(uintptr_t)pVCpu->idCpu);
2594 AssertRC(rc);
2595 return rc;
2596}
2597
2598
2599/**
2600 * Checks if we need to reschedule due to VMM device heap changes.
2601 *
2602 * @returns true if a reschedule is required, otherwise false.
2603 * @param pVM The cross context VM structure.
2604 * @param pCtx VM execution context.
2605 */
2606VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCCPUMCTX pCtx)
2607{
2608 /*
2609 * The VMM device heap is a requirement for emulating real-mode or protected-mode without paging
2610 * when the unrestricted guest execution feature is missing (VT-x only).
2611 */
2612 if ( pVM->hm.s.vmx.fEnabled
2613 && !pVM->hm.s.vmx.fUnrestrictedGuestCfg
2614 && CPUMIsGuestInRealModeEx(pCtx)
2615 && !PDMVmmDevHeapIsEnabled(pVM))
2616 return true;
2617
2618 return false;
2619}
2620
2621
2622/**
2623 * Noticiation callback from DBGF when interrupt breakpoints or generic debug
2624 * event settings changes.
2625 *
2626 * DBGF will call HMR3NotifyDebugEventChangedPerCpu on each CPU afterwards, this
2627 * function is just updating the VM globals.
2628 *
2629 * @param pVM The VM cross context VM structure.
2630 * @thread EMT(0)
2631 */
2632VMMR3_INT_DECL(void) HMR3NotifyDebugEventChanged(PVM pVM)
2633{
2634 /* Interrupts. */
2635 bool fUseDebugLoop = pVM->dbgf.ro.cSoftIntBreakpoints > 0
2636 || pVM->dbgf.ro.cHardIntBreakpoints > 0;
2637
2638 /* CPU Exceptions. */
2639 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_XCPT_FIRST;
2640 !fUseDebugLoop && enmEvent <= DBGFEVENT_XCPT_LAST;
2641 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2642 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2643
2644 /* Common VM exits. */
2645 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_FIRST;
2646 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_LAST_COMMON;
2647 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2648 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2649
2650 /* Vendor specific VM exits. */
2651 if (HMR3IsVmxEnabled(pVM->pUVM))
2652 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_VMX_FIRST;
2653 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_VMX_LAST;
2654 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2655 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2656 else
2657 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_SVM_FIRST;
2658 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_SVM_LAST;
2659 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2660 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2661
2662 /* Done. */
2663 pVM->hm.s.fUseDebugLoop = fUseDebugLoop;
2664}
2665
2666
2667/**
2668 * Follow up notification callback to HMR3NotifyDebugEventChanged for each CPU.
2669 *
2670 * HM uses this to combine the decision made by HMR3NotifyDebugEventChanged with
2671 * per CPU settings.
2672 *
2673 * @param pVM The VM cross context VM structure.
2674 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2675 */
2676VMMR3_INT_DECL(void) HMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu)
2677{
2678 pVCpu->hm.s.fUseDebugLoop = pVCpu->hm.s.fSingleInstruction | pVM->hm.s.fUseDebugLoop;
2679}
2680
2681
2682/**
2683 * Checks if we are currently using hardware acceleration.
2684 *
2685 * @returns true if hardware acceleration is being used, otherwise false.
2686 * @param pVCpu The cross context virtual CPU structure.
2687 */
2688VMMR3_INT_DECL(bool) HMR3IsActive(PCVMCPU pVCpu)
2689{
2690 return pVCpu->hm.s.fActive;
2691}
2692
2693
2694/**
2695 * External interface for querying whether hardware acceleration is enabled.
2696 *
2697 * @returns true if VT-x or AMD-V is being used, otherwise false.
2698 * @param pUVM The user mode VM handle.
2699 * @sa HMIsEnabled, HMIsEnabledNotMacro.
2700 */
2701VMMR3DECL(bool) HMR3IsEnabled(PUVM pUVM)
2702{
2703 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2704 PVM pVM = pUVM->pVM;
2705 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2706 return pVM->fHMEnabled; /* Don't use the macro as the GUI may query us very very early. */
2707}
2708
2709
2710/**
2711 * External interface for querying whether VT-x is being used.
2712 *
2713 * @returns true if VT-x is being used, otherwise false.
2714 * @param pUVM The user mode VM handle.
2715 * @sa HMR3IsSvmEnabled, HMIsEnabled
2716 */
2717VMMR3DECL(bool) HMR3IsVmxEnabled(PUVM pUVM)
2718{
2719 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2720 PVM pVM = pUVM->pVM;
2721 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2722 return pVM->hm.s.vmx.fEnabled
2723 && pVM->hm.s.vmx.fSupported
2724 && pVM->fHMEnabled;
2725}
2726
2727
2728/**
2729 * External interface for querying whether AMD-V is being used.
2730 *
2731 * @returns true if VT-x is being used, otherwise false.
2732 * @param pUVM The user mode VM handle.
2733 * @sa HMR3IsVmxEnabled, HMIsEnabled
2734 */
2735VMMR3DECL(bool) HMR3IsSvmEnabled(PUVM pUVM)
2736{
2737 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2738 PVM pVM = pUVM->pVM;
2739 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2740 return pVM->hm.s.svm.fEnabled
2741 && pVM->hm.s.svm.fSupported
2742 && pVM->fHMEnabled;
2743}
2744
2745
2746/**
2747 * Checks if we are currently using nested paging.
2748 *
2749 * @returns true if nested paging is being used, otherwise false.
2750 * @param pUVM The user mode VM handle.
2751 */
2752VMMR3DECL(bool) HMR3IsNestedPagingActive(PUVM pUVM)
2753{
2754 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2755 PVM pVM = pUVM->pVM;
2756 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2757 return pVM->hm.s.fNestedPagingCfg;
2758}
2759
2760
2761/**
2762 * Checks if virtualized APIC registers are enabled.
2763 *
2764 * When enabled this feature allows the hardware to access most of the
2765 * APIC registers in the virtual-APIC page without causing VM-exits. See
2766 * Intel spec. 29.1.1 "Virtualized APIC Registers".
2767 *
2768 * @returns true if virtualized APIC registers is enabled, otherwise
2769 * false.
2770 * @param pUVM The user mode VM handle.
2771 */
2772VMMR3DECL(bool) HMR3AreVirtApicRegsEnabled(PUVM pUVM)
2773{
2774 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2775 PVM pVM = pUVM->pVM;
2776 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2777 return pVM->hm.s.fVirtApicRegs;
2778}
2779
2780
2781/**
2782 * Checks if APIC posted-interrupt processing is enabled.
2783 *
2784 * This returns whether we can deliver interrupts to the guest without
2785 * leaving guest-context by updating APIC state from host-context.
2786 *
2787 * @returns true if APIC posted-interrupt processing is enabled,
2788 * otherwise false.
2789 * @param pUVM The user mode VM handle.
2790 */
2791VMMR3DECL(bool) HMR3IsPostedIntrsEnabled(PUVM pUVM)
2792{
2793 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2794 PVM pVM = pUVM->pVM;
2795 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2796 return pVM->hm.s.fPostedIntrs;
2797}
2798
2799
2800/**
2801 * Checks if we are currently using VPID in VT-x mode.
2802 *
2803 * @returns true if VPID is being used, otherwise false.
2804 * @param pUVM The user mode VM handle.
2805 */
2806VMMR3DECL(bool) HMR3IsVpidActive(PUVM pUVM)
2807{
2808 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2809 PVM pVM = pUVM->pVM;
2810 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2811 return pVM->hm.s.ForR3.vmx.fVpid;
2812}
2813
2814
2815/**
2816 * Checks if we are currently using VT-x unrestricted execution,
2817 * aka UX.
2818 *
2819 * @returns true if UX is being used, otherwise false.
2820 * @param pUVM The user mode VM handle.
2821 */
2822VMMR3DECL(bool) HMR3IsUXActive(PUVM pUVM)
2823{
2824 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2825 PVM pVM = pUVM->pVM;
2826 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2827 return pVM->hm.s.vmx.fUnrestrictedGuestCfg
2828 || pVM->hm.s.svm.fSupported;
2829}
2830
2831
2832/**
2833 * Checks if the VMX-preemption timer is being used.
2834 *
2835 * @returns true if the VMX-preemption timer is being used, otherwise false.
2836 * @param pVM The cross context VM structure.
2837 */
2838VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM)
2839{
2840 return HMIsEnabled(pVM)
2841 && pVM->hm.s.vmx.fEnabled
2842 && pVM->hm.s.vmx.fUsePreemptTimerCfg;
2843}
2844
2845
2846#ifdef TODO_9217_VMCSINFO
2847/**
2848 * Helper for HMR3CheckError to log VMCS controls to the release log.
2849 *
2850 * @param idCpu The Virtual CPU ID.
2851 * @param pVmcsInfo The VMCS info. object.
2852 */
2853static void hmR3CheckErrorLogVmcsCtls(VMCPUID idCpu, PCVMXVMCSINFO pVmcsInfo)
2854{
2855 LogRel(("HM: CPU[%u] PinCtls %#RX32\n", idCpu, pVmcsInfo->u32PinCtls));
2856 {
2857 uint32_t const u32Val = pVmcsInfo->u32PinCtls;
2858 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_EXT_INT_EXIT );
2859 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_NMI_EXIT );
2860 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_VIRT_NMI );
2861 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_PREEMPT_TIMER);
2862 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_POSTED_INT );
2863 }
2864 LogRel(("HM: CPU[%u] ProcCtls %#RX32\n", idCpu, pVmcsInfo->u32ProcCtls));
2865 {
2866 uint32_t const u32Val = pVmcsInfo->u32ProcCtls;
2867 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INT_WINDOW_EXIT );
2868 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TSC_OFFSETTING);
2869 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_HLT_EXIT );
2870 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INVLPG_EXIT );
2871 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MWAIT_EXIT );
2872 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_RDPMC_EXIT );
2873 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_RDTSC_EXIT );
2874 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR3_LOAD_EXIT );
2875 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR3_STORE_EXIT );
2876 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TERTIARY_CTLS );
2877 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR8_LOAD_EXIT );
2878 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR8_STORE_EXIT );
2879 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TPR_SHADOW );
2880 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_NMI_WINDOW_EXIT );
2881 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MOV_DR_EXIT );
2882 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_UNCOND_IO_EXIT );
2883 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_IO_BITMAPS );
2884 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MONITOR_TRAP_FLAG );
2885 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_MSR_BITMAPS );
2886 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MONITOR_EXIT );
2887 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_PAUSE_EXIT );
2888 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_SECONDARY_CTLS);
2889 }
2890 LogRel(("HM: CPU[%u] ProcCtls2 %#RX32\n", idCpu, pVmcsInfo->u32ProcCtls2));
2891 {
2892 uint32_t const u32Val = pVmcsInfo->u32ProcCtls2;
2893 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_APIC_ACCESS );
2894 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT );
2895 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_DESC_TABLE_EXIT );
2896 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDTSCP );
2897 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_X2APIC_MODE );
2898 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VPID );
2899 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_WBINVD_EXIT );
2900 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_UNRESTRICTED_GUEST );
2901 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_APIC_REG_VIRT );
2902 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_INT_DELIVERY );
2903 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT );
2904 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDRAND_EXIT );
2905 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_INVPCID );
2906 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VMFUNC );
2907 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VMCS_SHADOWING );
2908 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_ENCLS_EXIT );
2909 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDSEED_EXIT );
2910 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PML );
2911 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT_XCPT_VE );
2912 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_CONCEAL_VMX_FROM_PT);
2913 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_XSAVES_XRSTORS );
2914 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_MODE_BASED_EPT_PERM);
2915 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_SPP_EPT );
2916 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PT_EPT );
2917 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_TSC_SCALING );
2918 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_USER_WAIT_PAUSE );
2919 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_ENCLV_EXIT );
2920 }
2921 LogRel(("HM: CPU[%u] EntryCtls %#RX32\n", idCpu, pVmcsInfo->u32EntryCtls));
2922 {
2923 uint32_t const u32Val = pVmcsInfo->u32EntryCtls;
2924 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_DEBUG );
2925 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_IA32E_MODE_GUEST );
2926 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_ENTRY_TO_SMM );
2927 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON);
2928 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PERF_MSR );
2929 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PAT_MSR );
2930 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_EFER_MSR );
2931 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR );
2932 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT);
2933 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_RTIT_CTL_MSR );
2934 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_CET_STATE );
2935 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PKRS_MSR );
2936 }
2937 LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", idCpu, pVmcsInfo->u32ExitCtls));
2938 {
2939 uint32_t const u32Val = pVmcsInfo->u32ExitCtls;
2940 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_DEBUG );
2941 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE );
2942 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PERF_MSR );
2943 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_ACK_EXT_INT );
2944 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PAT_MSR );
2945 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PAT_MSR );
2946 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_EFER_MSR );
2947 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_EFER_MSR );
2948 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER );
2949 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR );
2950 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT );
2951 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CLEAR_RTIT_CTL_MSR );
2952 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_CET_STATE );
2953 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PKRS_MSR );
2954 }
2955}
2956#endif
2957
2958
2959/**
2960 * Check fatal VT-x/AMD-V error and produce some meaningful
2961 * log release message.
2962 *
2963 * @param pVM The cross context VM structure.
2964 * @param iStatusCode VBox status code.
2965 */
2966VMMR3_INT_DECL(void) HMR3CheckError(PVM pVM, int iStatusCode)
2967{
2968 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2969 {
2970 /** @todo r=ramshankar: Are all EMTs out of ring-0 at this point!? If not, we
2971 * might be getting inaccurate values for non-guru'ing EMTs. */
2972 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
2973#ifdef TODO_9217_VMCSINFO
2974 PCVMXVMCSINFOSHARED pVmcsInfo = hmGetVmxActiveVmcsInfoShared(pVCpu);
2975#endif
2976 bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3;
2977 switch (iStatusCode)
2978 {
2979 case VERR_VMX_INVALID_VMCS_PTR:
2980 {
2981 LogRel(("HM: VERR_VMX_INVALID_VMCS_PTR:\n"));
2982 LogRel(("HM: CPU[%u] %s VMCS active\n", idCpu, fNstGstVmcsActive ? "Nested-guest" : "Guest"));
2983#ifdef TODO_9217_VMCSINFO
2984 LogRel(("HM: CPU[%u] Current pointer %#RHp vs %#RHp\n", idCpu, pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs,
2985 pVmcsInfo->HCPhysVmcs));
2986#endif
2987 LogRel(("HM: CPU[%u] Current VMCS version %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32VmcsRev));
2988 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
2989 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
2990 break;
2991 }
2992
2993 case VERR_VMX_UNABLE_TO_START_VM:
2994 {
2995 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM:\n"));
2996 LogRel(("HM: CPU[%u] %s VMCS active\n", idCpu, fNstGstVmcsActive ? "Nested-guest" : "Guest"));
2997 LogRel(("HM: CPU[%u] Instruction error %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32InstrError));
2998 LogRel(("HM: CPU[%u] Exit reason %#x\n", idCpu, pVCpu->hm.s.vmx.LastError.u32ExitReason));
2999
3000 if ( pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS
3001 || pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS)
3002 {
3003 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
3004 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", idCpu, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
3005 }
3006 else if (pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTLS)
3007 {
3008#ifdef TODO_9217_VMCSINFO
3009 hmR3CheckErrorLogVmcsCtls(idCpu, pVmcsInfo);
3010 LogRel(("HM: CPU[%u] HCPhysMsrBitmap %#RHp\n", idCpu, pVmcsInfo->HCPhysMsrBitmap));
3011 LogRel(("HM: CPU[%u] HCPhysGuestMsrLoad %#RHp\n", idCpu, pVmcsInfo->HCPhysGuestMsrLoad));
3012 LogRel(("HM: CPU[%u] HCPhysGuestMsrStore %#RHp\n", idCpu, pVmcsInfo->HCPhysGuestMsrStore));
3013 LogRel(("HM: CPU[%u] HCPhysHostMsrLoad %#RHp\n", idCpu, pVmcsInfo->HCPhysHostMsrLoad));
3014 LogRel(("HM: CPU[%u] cEntryMsrLoad %u\n", idCpu, pVmcsInfo->cEntryMsrLoad));
3015 LogRel(("HM: CPU[%u] cExitMsrStore %u\n", idCpu, pVmcsInfo->cExitMsrStore));
3016 LogRel(("HM: CPU[%u] cExitMsrLoad %u\n", idCpu, pVmcsInfo->cExitMsrLoad));
3017#endif
3018 }
3019 /** @todo Log VM-entry event injection control fields
3020 * VMX_VMCS_CTRL_ENTRY_IRQ_INFO, VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE
3021 * and VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH from the VMCS. */
3022 break;
3023 }
3024
3025 case VERR_VMX_INVALID_GUEST_STATE:
3026 {
3027 LogRel(("HM: VERR_VMX_INVALID_GUEST_STATE:\n"));
3028 LogRel(("HM: CPU[%u] HM error = %#RX32\n", idCpu, pVCpu->hm.s.u32HMError));
3029 LogRel(("HM: CPU[%u] Guest-intr. state = %#RX32\n", idCpu, pVCpu->hm.s.vmx.LastError.u32GuestIntrState));
3030#ifdef TODO_9217_VMCSINFO
3031 hmR3CheckErrorLogVmcsCtls(idCpu, pVmcsInfo);
3032#endif
3033 break;
3034 }
3035
3036 /* The guru will dump the HM error and exit history. Nothing extra to report for these errors. */
3037 case VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO:
3038 case VERR_VMX_INVALID_VMXON_PTR:
3039 case VERR_VMX_UNEXPECTED_EXIT:
3040 case VERR_VMX_INVALID_VMCS_FIELD:
3041 case VERR_SVM_UNKNOWN_EXIT:
3042 case VERR_SVM_UNEXPECTED_EXIT:
3043 case VERR_SVM_UNEXPECTED_PATCH_TYPE:
3044 case VERR_SVM_UNEXPECTED_XCPT_EXIT:
3045 case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE:
3046 break;
3047 }
3048 }
3049
3050 if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM)
3051 {
3052 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-1 %#RX32\n", pVM->hm.s.ForR3.vmx.Msrs.EntryCtls.n.allowed1));
3053 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-0 %#RX32\n", pVM->hm.s.ForR3.vmx.Msrs.EntryCtls.n.allowed0));
3054 }
3055 else if (iStatusCode == VERR_VMX_INVALID_VMXON_PTR)
3056 LogRel(("HM: HCPhysVmxEnableError = %#RHp\n", pVM->hm.s.ForR3.vmx.HCPhysVmxEnableError));
3057}
3058
3059
3060/**
3061 * Execute state save operation.
3062 *
3063 * Save only data that cannot be re-loaded while entering HM ring-0 code. This
3064 * is because we always save the VM state from ring-3 and thus most HM state
3065 * will be re-synced dynamically at runtime and don't need to be part of the VM
3066 * saved state.
3067 *
3068 * @returns VBox status code.
3069 * @param pVM The cross context VM structure.
3070 * @param pSSM SSM operation handle.
3071 */
3072static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM)
3073{
3074 Log(("hmR3Save:\n"));
3075
3076 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3077 {
3078 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3079 Assert(!pVCpu->hm.s.Event.fPending);
3080 if (pVM->cpum.ro.GuestFeatures.fSvm)
3081 {
3082 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
3083 SSMR3PutBool(pSSM, pVmcbNstGstCache->fCacheValid);
3084 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdCRx);
3085 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrCRx);
3086 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdDRx);
3087 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrDRx);
3088 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16PauseFilterThreshold);
3089 SSMR3PutU16(pSSM, pVmcbNstGstCache->u16PauseFilterCount);
3090 SSMR3PutU32(pSSM, pVmcbNstGstCache->u32InterceptXcpt);
3091 SSMR3PutU64(pSSM, pVmcbNstGstCache->u64InterceptCtrl);
3092 SSMR3PutU64(pSSM, pVmcbNstGstCache->u64TSCOffset);
3093 SSMR3PutBool(pSSM, pVmcbNstGstCache->fVIntrMasking);
3094 SSMR3PutBool(pSSM, pVmcbNstGstCache->fNestedPaging);
3095 SSMR3PutBool(pSSM, pVmcbNstGstCache->fLbrVirt);
3096 }
3097 }
3098
3099 /* Save the guest patch data. */
3100 SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem);
3101 SSMR3PutGCPtr(pSSM, pVM->hm.s.pFreeGuestPatchMem);
3102 SSMR3PutU32(pSSM, pVM->hm.s.cbGuestPatchMem);
3103
3104 /* Store all the guest patch records too. */
3105 int rc = SSMR3PutU32(pSSM, pVM->hm.s.cPatches);
3106 if (RT_FAILURE(rc))
3107 return rc;
3108
3109 for (uint32_t i = 0; i < pVM->hm.s.cPatches; i++)
3110 {
3111 AssertCompileSize(HMTPRINSTR, 4);
3112 PCHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
3113 SSMR3PutU32(pSSM, pPatch->Core.Key);
3114 SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
3115 SSMR3PutU32(pSSM, pPatch->cbOp);
3116 SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
3117 SSMR3PutU32(pSSM, pPatch->cbNewOp);
3118 SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
3119 SSMR3PutU32(pSSM, pPatch->uSrcOperand);
3120 SSMR3PutU32(pSSM, pPatch->uDstOperand);
3121 SSMR3PutU32(pSSM, pPatch->pJumpTarget);
3122 rc = SSMR3PutU32(pSSM, pPatch->cFaults);
3123 if (RT_FAILURE(rc))
3124 return rc;
3125 }
3126
3127 return VINF_SUCCESS;
3128}
3129
3130
3131/**
3132 * Execute state load operation.
3133 *
3134 * @returns VBox status code.
3135 * @param pVM The cross context VM structure.
3136 * @param pSSM SSM operation handle.
3137 * @param uVersion Data layout version.
3138 * @param uPass The data pass.
3139 */
3140static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3141{
3142 int rc;
3143
3144 LogFlowFunc(("uVersion=%u\n", uVersion));
3145 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
3146
3147 /*
3148 * Validate version.
3149 */
3150 if ( uVersion != HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT
3151 && uVersion != HM_SAVED_STATE_VERSION_TPR_PATCHING
3152 && uVersion != HM_SAVED_STATE_VERSION_NO_TPR_PATCHING
3153 && uVersion != HM_SAVED_STATE_VERSION_2_0_X)
3154 {
3155 AssertMsgFailed(("hmR3Load: Invalid version uVersion=%d!\n", uVersion));
3156 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3157 }
3158
3159 /*
3160 * Load per-VCPU state.
3161 */
3162 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3163 {
3164 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3165 if (uVersion >= HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT)
3166 {
3167 /* Load the SVM nested hw.virt state if the VM is configured for it. */
3168 if (pVM->cpum.ro.GuestFeatures.fSvm)
3169 {
3170 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
3171 SSMR3GetBool(pSSM, &pVmcbNstGstCache->fCacheValid);
3172 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdCRx);
3173 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrCRx);
3174 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdDRx);
3175 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrDRx);
3176 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16PauseFilterThreshold);
3177 SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16PauseFilterCount);
3178 SSMR3GetU32(pSSM, &pVmcbNstGstCache->u32InterceptXcpt);
3179 SSMR3GetU64(pSSM, &pVmcbNstGstCache->u64InterceptCtrl);
3180 SSMR3GetU64(pSSM, &pVmcbNstGstCache->u64TSCOffset);
3181 SSMR3GetBool(pSSM, &pVmcbNstGstCache->fVIntrMasking);
3182 SSMR3GetBool(pSSM, &pVmcbNstGstCache->fNestedPaging);
3183 rc = SSMR3GetBool(pSSM, &pVmcbNstGstCache->fLbrVirt);
3184 AssertRCReturn(rc, rc);
3185 }
3186 }
3187 else
3188 {
3189 /* Pending HM event (obsolete for a long time since TPRM holds the info.) */
3190 SSMR3GetU32(pSSM, &pVCpu->hm.s.Event.fPending);
3191 SSMR3GetU32(pSSM, &pVCpu->hm.s.Event.u32ErrCode);
3192 SSMR3GetU64(pSSM, &pVCpu->hm.s.Event.u64IntInfo);
3193
3194 /* VMX fWasInRealMode related data. */
3195 uint32_t uDummy;
3196 SSMR3GetU32(pSSM, &uDummy);
3197 SSMR3GetU32(pSSM, &uDummy);
3198 rc = SSMR3GetU32(pSSM, &uDummy);
3199 AssertRCReturn(rc, rc);
3200 }
3201 }
3202
3203 /*
3204 * Load TPR patching data.
3205 */
3206 if (uVersion >= HM_SAVED_STATE_VERSION_TPR_PATCHING)
3207 {
3208 SSMR3GetGCPtr(pSSM, &pVM->hm.s.pGuestPatchMem);
3209 SSMR3GetGCPtr(pSSM, &pVM->hm.s.pFreeGuestPatchMem);
3210 SSMR3GetU32(pSSM, &pVM->hm.s.cbGuestPatchMem);
3211
3212 /* Fetch all TPR patch records. */
3213 rc = SSMR3GetU32(pSSM, &pVM->hm.s.cPatches);
3214 AssertRCReturn(rc, rc);
3215 for (uint32_t i = 0; i < pVM->hm.s.cPatches; i++)
3216 {
3217 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
3218 SSMR3GetU32(pSSM, &pPatch->Core.Key);
3219 SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
3220 SSMR3GetU32(pSSM, &pPatch->cbOp);
3221 SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
3222 SSMR3GetU32(pSSM, &pPatch->cbNewOp);
3223 SSM_GET_ENUM32_RET(pSSM, pPatch->enmType, HMTPRINSTR);
3224
3225 if (pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT)
3226 pVM->hm.s.fTprPatchingActive = true;
3227 Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fTprPatchingActive == false);
3228
3229 SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
3230 SSMR3GetU32(pSSM, &pPatch->uDstOperand);
3231 SSMR3GetU32(pSSM, &pPatch->cFaults);
3232 rc = SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
3233 AssertRCReturn(rc, rc);
3234
3235 LogFlow(("hmR3Load: patch %d\n", i));
3236 LogFlow(("Key = %x\n", pPatch->Core.Key));
3237 LogFlow(("cbOp = %d\n", pPatch->cbOp));
3238 LogFlow(("cbNewOp = %d\n", pPatch->cbNewOp));
3239 LogFlow(("type = %d\n", pPatch->enmType));
3240 LogFlow(("srcop = %d\n", pPatch->uSrcOperand));
3241 LogFlow(("dstop = %d\n", pPatch->uDstOperand));
3242 LogFlow(("cFaults = %d\n", pPatch->cFaults));
3243 LogFlow(("target = %x\n", pPatch->pJumpTarget));
3244
3245 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
3246 AssertRCReturn(rc, rc);
3247 }
3248 }
3249
3250 return VINF_SUCCESS;
3251}
3252
3253
3254/**
3255 * Displays HM info.
3256 *
3257 * @param pVM The cross context VM structure.
3258 * @param pHlp The info helper functions.
3259 * @param pszArgs Arguments, ignored.
3260 */
3261static DECLCALLBACK(void) hmR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3262{
3263 NOREF(pszArgs);
3264 PVMCPU pVCpu = VMMGetCpu(pVM);
3265 if (!pVCpu)
3266 pVCpu = pVM->apCpusR3[0];
3267
3268 if (HMIsEnabled(pVM))
3269 {
3270 if (pVM->hm.s.vmx.fSupported)
3271 pHlp->pfnPrintf(pHlp, "CPU[%u]: VT-x info:\n", pVCpu->idCpu);
3272 else
3273 pHlp->pfnPrintf(pHlp, "CPU[%u]: AMD-V info:\n", pVCpu->idCpu);
3274 pHlp->pfnPrintf(pHlp, " HM error = %#x (%u)\n", pVCpu->hm.s.u32HMError, pVCpu->hm.s.u32HMError);
3275 pHlp->pfnPrintf(pHlp, " rcLastExitToR3 = %Rrc\n", pVCpu->hm.s.rcLastExitToR3);
3276 if (pVM->hm.s.vmx.fSupported)
3277 {
3278 PCVMXVMCSINFOSHARED pVmcsInfoShared = hmGetVmxActiveVmcsInfoShared(pVCpu);
3279 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
3280 bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3;
3281
3282 pHlp->pfnPrintf(pHlp, " %s VMCS active\n", fNstGstVmcsActive ? "Nested-guest" : "Guest");
3283 pHlp->pfnPrintf(pHlp, " Real-on-v86 active = %RTbool\n", fRealOnV86Active);
3284 if (fRealOnV86Active)
3285 {
3286 pHlp->pfnPrintf(pHlp, " EFlags = %#x\n", pVmcsInfoShared->RealMode.Eflags.u32);
3287 pHlp->pfnPrintf(pHlp, " Attr CS = %#x\n", pVmcsInfoShared->RealMode.AttrCS.u);
3288 pHlp->pfnPrintf(pHlp, " Attr SS = %#x\n", pVmcsInfoShared->RealMode.AttrSS.u);
3289 pHlp->pfnPrintf(pHlp, " Attr DS = %#x\n", pVmcsInfoShared->RealMode.AttrDS.u);
3290 pHlp->pfnPrintf(pHlp, " Attr ES = %#x\n", pVmcsInfoShared->RealMode.AttrES.u);
3291 pHlp->pfnPrintf(pHlp, " Attr FS = %#x\n", pVmcsInfoShared->RealMode.AttrFS.u);
3292 pHlp->pfnPrintf(pHlp, " Attr GS = %#x\n", pVmcsInfoShared->RealMode.AttrGS.u);
3293 }
3294 }
3295 }
3296 else
3297 pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
3298}
3299
3300
3301/**
3302 * Displays the HM Last-Branch-Record info. for the guest.
3303 *
3304 * @param pVM The cross context VM structure.
3305 * @param pHlp The info helper functions.
3306 * @param pszArgs Arguments, ignored.
3307 */
3308static DECLCALLBACK(void) hmR3InfoLbr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3309{
3310 NOREF(pszArgs);
3311 PVMCPU pVCpu = VMMGetCpu(pVM);
3312 if (!pVCpu)
3313 pVCpu = pVM->apCpusR3[0];
3314
3315 if (!HMIsEnabled(pVM))
3316 pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
3317 else if (HMIsVmxActive(pVM))
3318 {
3319 if (pVM->hm.s.vmx.fLbrCfg)
3320 {
3321 PCVMXVMCSINFOSHARED pVmcsInfoShared = hmGetVmxActiveVmcsInfoShared(pVCpu);
3322 uint32_t const cLbrStack = pVM->hm.s.ForR3.vmx.idLbrFromIpMsrLast - pVM->hm.s.ForR3.vmx.idLbrFromIpMsrFirst + 1;
3323
3324 /** @todo r=ramshankar: The index technically varies depending on the CPU, but
3325 * 0xf should cover everything we support thus far. Fix if necessary
3326 * later. */
3327 uint32_t const idxTopOfStack = pVmcsInfoShared->u64LbrTosMsr & 0xf;
3328 if (idxTopOfStack > cLbrStack)
3329 {
3330 pHlp->pfnPrintf(pHlp, "Top-of-stack LBR MSR seems corrupt (index=%u, msr=%#RX64) expected index < %u\n",
3331 idxTopOfStack, pVmcsInfoShared->u64LbrTosMsr, cLbrStack);
3332 return;
3333 }
3334
3335 /*
3336 * Dump the circular buffer of LBR records starting from the most recent record (contained in idxTopOfStack).
3337 */
3338 pHlp->pfnPrintf(pHlp, "CPU[%u]: LBRs (most-recent first)\n", pVCpu->idCpu);
3339 uint32_t idxCurrent = idxTopOfStack;
3340 Assert(idxTopOfStack < cLbrStack);
3341 Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr) <= cLbrStack);
3342 Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrToIpMsr) <= cLbrStack);
3343 for (;;)
3344 {
3345 if (pVM->hm.s.ForR3.vmx.idLbrToIpMsrFirst)
3346 pHlp->pfnPrintf(pHlp, " Branch (%2u): From IP=%#016RX64 - To IP=%#016RX64\n", idxCurrent,
3347 pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent], pVmcsInfoShared->au64LbrToIpMsr[idxCurrent]);
3348 else
3349 pHlp->pfnPrintf(pHlp, " Branch (%2u): LBR=%#RX64\n", idxCurrent, pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent]);
3350
3351 idxCurrent = (idxCurrent - 1) % cLbrStack;
3352 if (idxCurrent == idxTopOfStack)
3353 break;
3354 }
3355 }
3356 else
3357 pHlp->pfnPrintf(pHlp, "VM not configured to record LBRs for the guest\n");
3358 }
3359 else
3360 {
3361 Assert(HMIsSvmActive(pVM));
3362 /** @todo SVM: LBRs (get them from VMCB if possible). */
3363 pHlp->pfnPrintf(pHlp, "SVM LBR not implemented.\n");
3364 }
3365}
3366
3367
3368/**
3369 * Displays the HM pending event.
3370 *
3371 * @param pVM The cross context VM structure.
3372 * @param pHlp The info helper functions.
3373 * @param pszArgs Arguments, ignored.
3374 */
3375static DECLCALLBACK(void) hmR3InfoEventPending(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3376{
3377 NOREF(pszArgs);
3378 PVMCPU pVCpu = VMMGetCpu(pVM);
3379 if (!pVCpu)
3380 pVCpu = pVM->apCpusR3[0];
3381
3382 if (HMIsEnabled(pVM))
3383 {
3384 pHlp->pfnPrintf(pHlp, "CPU[%u]: HM event (fPending=%RTbool)\n", pVCpu->idCpu, pVCpu->hm.s.Event.fPending);
3385 if (pVCpu->hm.s.Event.fPending)
3386 {
3387 pHlp->pfnPrintf(pHlp, " u64IntInfo = %#RX64\n", pVCpu->hm.s.Event.u64IntInfo);
3388 pHlp->pfnPrintf(pHlp, " u32ErrCode = %#RX64\n", pVCpu->hm.s.Event.u32ErrCode);
3389 pHlp->pfnPrintf(pHlp, " cbInstr = %u bytes\n", pVCpu->hm.s.Event.cbInstr);
3390 pHlp->pfnPrintf(pHlp, " GCPtrFaultAddress = %#RGp\n", pVCpu->hm.s.Event.GCPtrFaultAddress);
3391 }
3392 }
3393 else
3394 pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
3395}
3396
3397
3398/**
3399 * Displays the SVM nested-guest VMCB cache.
3400 *
3401 * @param pVM The cross context VM structure.
3402 * @param pHlp The info helper functions.
3403 * @param pszArgs Arguments, ignored.
3404 */
3405static DECLCALLBACK(void) hmR3InfoSvmNstGstVmcbCache(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3406{
3407 NOREF(pszArgs);
3408 PVMCPU pVCpu = VMMGetCpu(pVM);
3409 if (!pVCpu)
3410 pVCpu = pVM->apCpusR3[0];
3411
3412 bool const fSvmEnabled = HMR3IsSvmEnabled(pVM->pUVM);
3413 if ( fSvmEnabled
3414 && pVM->cpum.ro.GuestFeatures.fSvm)
3415 {
3416 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
3417 pHlp->pfnPrintf(pHlp, "CPU[%u]: HM SVM nested-guest VMCB cache\n", pVCpu->idCpu);
3418 pHlp->pfnPrintf(pHlp, " fCacheValid = %#RTbool\n", pVmcbNstGstCache->fCacheValid);
3419 pHlp->pfnPrintf(pHlp, " u16InterceptRdCRx = %#RX16\n", pVmcbNstGstCache->u16InterceptRdCRx);
3420 pHlp->pfnPrintf(pHlp, " u16InterceptWrCRx = %#RX16\n", pVmcbNstGstCache->u16InterceptWrCRx);
3421 pHlp->pfnPrintf(pHlp, " u16InterceptRdDRx = %#RX16\n", pVmcbNstGstCache->u16InterceptRdDRx);
3422 pHlp->pfnPrintf(pHlp, " u16InterceptWrDRx = %#RX16\n", pVmcbNstGstCache->u16InterceptWrDRx);
3423 pHlp->pfnPrintf(pHlp, " u16PauseFilterThreshold = %#RX16\n", pVmcbNstGstCache->u16PauseFilterThreshold);
3424 pHlp->pfnPrintf(pHlp, " u16PauseFilterCount = %#RX16\n", pVmcbNstGstCache->u16PauseFilterCount);
3425 pHlp->pfnPrintf(pHlp, " u32InterceptXcpt = %#RX32\n", pVmcbNstGstCache->u32InterceptXcpt);
3426 pHlp->pfnPrintf(pHlp, " u64InterceptCtrl = %#RX64\n", pVmcbNstGstCache->u64InterceptCtrl);
3427 pHlp->pfnPrintf(pHlp, " u64TSCOffset = %#RX64\n", pVmcbNstGstCache->u64TSCOffset);
3428 pHlp->pfnPrintf(pHlp, " fVIntrMasking = %RTbool\n", pVmcbNstGstCache->fVIntrMasking);
3429 pHlp->pfnPrintf(pHlp, " fNestedPaging = %RTbool\n", pVmcbNstGstCache->fNestedPaging);
3430 pHlp->pfnPrintf(pHlp, " fLbrVirt = %RTbool\n", pVmcbNstGstCache->fLbrVirt);
3431 }
3432 else
3433 {
3434 if (!fSvmEnabled)
3435 pHlp->pfnPrintf(pHlp, "HM SVM is not enabled for this VM!\n");
3436 else
3437 pHlp->pfnPrintf(pHlp, "SVM feature is not exposed to the guest!\n");
3438 }
3439}
3440
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette