VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/HM.cpp@ 79512

Last change on this file since 79512 was 79512, checked in by vboxsync, 5 years ago

VMM/HM: Nested VMX: bugref:9180 LogFlow nit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 152.0 KB
Line 
1/* $Id: HM.cpp 79512 2019-07-04 05:56:32Z vboxsync $ */
2/** @file
3 * HM - Intel/AMD VM Hardware Support Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_hm HM - Hardware Assisted Virtualization Manager
19 *
20 * The HM manages guest execution using the VT-x and AMD-V CPU hardware
21 * extensions.
22 *
23 * {summary of what HM does}
24 *
25 * Hardware assisted virtualization manager was originally abbreviated HWACCM,
26 * however that was cumbersome to write and parse for such a central component,
27 * so it was shortened to HM when refactoring the code in the 4.3 development
28 * cycle.
29 *
30 * {add sections with more details}
31 *
32 * @sa @ref grp_hm
33 */
34
35
36/*********************************************************************************************************************************
37* Header Files *
38*********************************************************************************************************************************/
39#define LOG_GROUP LOG_GROUP_HM
40#define VMCPU_INCL_CPUM_GST_CTX
41#include <VBox/vmm/cpum.h>
42#include <VBox/vmm/stam.h>
43#include <VBox/vmm/mm.h>
44#include <VBox/vmm/em.h>
45#include <VBox/vmm/pdmapi.h>
46#include <VBox/vmm/pgm.h>
47#include <VBox/vmm/ssm.h>
48#include <VBox/vmm/gim.h>
49#include <VBox/vmm/trpm.h>
50#include <VBox/vmm/dbgf.h>
51#include <VBox/vmm/iom.h>
52#include <VBox/vmm/iem.h>
53#include <VBox/vmm/patm.h>
54#include <VBox/vmm/csam.h>
55#include <VBox/vmm/selm.h>
56#include <VBox/vmm/nem.h>
57#ifdef VBOX_WITH_REM
58# include <VBox/vmm/rem.h>
59#endif
60#include <VBox/vmm/hm_vmx.h>
61#include <VBox/vmm/hm_svm.h>
62#include "HMInternal.h"
63#include <VBox/vmm/vm.h>
64#include <VBox/vmm/uvm.h>
65#include <VBox/err.h>
66#include <VBox/param.h>
67
68#include <iprt/assert.h>
69#include <VBox/log.h>
70#include <iprt/asm.h>
71#include <iprt/asm-amd64-x86.h>
72#include <iprt/env.h>
73#include <iprt/thread.h>
74
75
76/*********************************************************************************************************************************
77* Defined Constants And Macros *
78*********************************************************************************************************************************/
79/** @def HMVMX_REPORT_FEAT
80 * Reports VT-x feature to the release log.
81 *
82 * @param a_uAllowed1 Mask of allowed-1 feature bits.
83 * @param a_uAllowed0 Mask of allowed-0 feature bits.
84 * @param a_StrDesc The description string to report.
85 * @param a_Featflag Mask of the feature to report.
86 */
87#define HMVMX_REPORT_FEAT(a_uAllowed1, a_uAllowed0, a_StrDesc, a_Featflag) \
88 do { \
89 if ((a_uAllowed1) & (a_Featflag)) \
90 { \
91 if ((a_uAllowed0) & (a_Featflag)) \
92 LogRel(("HM: " a_StrDesc " (must be set)\n")); \
93 else \
94 LogRel(("HM: " a_StrDesc "\n")); \
95 } \
96 else \
97 LogRel(("HM: " a_StrDesc " (must be cleared)\n")); \
98 } while (0)
99
100/** @def HMVMX_REPORT_ALLOWED_FEAT
101 * Reports an allowed VT-x feature to the release log.
102 *
103 * @param a_uAllowed1 Mask of allowed-1 feature bits.
104 * @param a_StrDesc The description string to report.
105 * @param a_FeatFlag Mask of the feature to report.
106 */
107#define HMVMX_REPORT_ALLOWED_FEAT(a_uAllowed1, a_StrDesc, a_FeatFlag) \
108 do { \
109 if ((a_uAllowed1) & (a_FeatFlag)) \
110 LogRel(("HM: " a_StrDesc "\n")); \
111 else \
112 LogRel(("HM: " a_StrDesc " not supported\n")); \
113 } while (0)
114
115/** @def HMVMX_REPORT_MSR_CAP
116 * Reports MSR feature capability.
117 *
118 * @param a_MsrCaps Mask of MSR feature bits.
119 * @param a_StrDesc The description string to report.
120 * @param a_fCap Mask of the feature to report.
121 */
122#define HMVMX_REPORT_MSR_CAP(a_MsrCaps, a_StrDesc, a_fCap) \
123 do { \
124 if ((a_MsrCaps) & (a_fCap)) \
125 LogRel(("HM: " a_StrDesc "\n")); \
126 } while (0)
127
128/** @def HMVMX_LOGREL_FEAT
129 * Dumps a feature flag from a bitmap of features to the release log.
130 *
131 * @param a_fVal The value of all the features.
132 * @param a_fMask The specific bitmask of the feature.
133 */
134#define HMVMX_LOGREL_FEAT(a_fVal, a_fMask) \
135 do { \
136 if ((a_fVal) & (a_fMask)) \
137 LogRel(("HM: %s\n", #a_fMask)); \
138 } while (0)
139
140
141/*********************************************************************************************************************************
142* Internal Functions *
143*********************************************************************************************************************************/
144static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM);
145static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
146static DECLCALLBACK(void) hmR3InfoSvmNstGstVmcbCache(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
147static DECLCALLBACK(void) hmR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
148static DECLCALLBACK(void) hmR3InfoEventPending(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
149static int hmR3InitFinalizeR3(PVM pVM);
150static int hmR3InitFinalizeR0(PVM pVM);
151static int hmR3InitFinalizeR0Intel(PVM pVM);
152static int hmR3InitFinalizeR0Amd(PVM pVM);
153static int hmR3TermCPU(PVM pVM);
154
155
156
157/**
158 * Initializes the HM.
159 *
160 * This is the very first component to really do init after CFGM so that we can
161 * establish the predominant execution engine for the VM prior to initializing
162 * other modules. It takes care of NEM initialization if needed (HM disabled or
163 * not available in HW).
164 *
165 * If VT-x or AMD-V hardware isn't available, HM will try fall back on a native
166 * hypervisor API via NEM, and then back on raw-mode if that isn't available
167 * either. The fallback to raw-mode will not happen if /HM/HMForced is set
168 * (like for guest using SMP or 64-bit as well as for complicated guest like OS
169 * X, OS/2 and others).
170 *
171 * Note that a lot of the set up work is done in ring-0 and thus postponed till
172 * the ring-3 and ring-0 callback to HMR3InitCompleted.
173 *
174 * @returns VBox status code.
175 * @param pVM The cross context VM structure.
176 *
177 * @remarks Be careful with what we call here, since most of the VMM components
178 * are uninitialized.
179 */
180VMMR3_INT_DECL(int) HMR3Init(PVM pVM)
181{
182 LogFlowFunc(("\n"));
183
184 /*
185 * Assert alignment and sizes.
186 */
187 AssertCompileMemberAlignment(VM, hm.s, 32);
188 AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding));
189
190 /*
191 * Register the saved state data unit.
192 */
193 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HM_SAVED_STATE_VERSION, sizeof(HM),
194 NULL, NULL, NULL,
195 NULL, hmR3Save, NULL,
196 NULL, hmR3Load, NULL);
197 if (RT_FAILURE(rc))
198 return rc;
199
200 /*
201 * Register info handlers.
202 */
203 rc = DBGFR3InfoRegisterInternalEx(pVM, "hm", "Dumps HM info.", hmR3Info, DBGFINFO_FLAGS_ALL_EMTS);
204 AssertRCReturn(rc, rc);
205
206 rc = DBGFR3InfoRegisterInternalEx(pVM, "hmeventpending", "Dumps the pending HM event.", hmR3InfoEventPending,
207 DBGFINFO_FLAGS_ALL_EMTS);
208 AssertRCReturn(rc, rc);
209
210 rc = DBGFR3InfoRegisterInternalEx(pVM, "svmvmcbcache", "Dumps the HM SVM nested-guest VMCB cache.",
211 hmR3InfoSvmNstGstVmcbCache, DBGFINFO_FLAGS_ALL_EMTS);
212 AssertRCReturn(rc, rc);
213
214 /*
215 * Read configuration.
216 */
217 PCFGMNODE pCfgHm = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HM/");
218
219 /*
220 * Validate the HM settings.
221 */
222 rc = CFGMR3ValidateConfig(pCfgHm, "/HM/",
223 "HMForced"
224 "|UseNEMInstead"
225 "|FallbackToNEM"
226 "|EnableNestedPaging"
227 "|EnableUX"
228 "|EnableLargePages"
229 "|EnableVPID"
230 "|IBPBOnVMExit"
231 "|IBPBOnVMEntry"
232 "|SpecCtrlByHost"
233 "|L1DFlushOnSched"
234 "|L1DFlushOnVMEntry"
235 "|MDSClearOnSched"
236 "|MDSClearOnVMEntry"
237 "|TPRPatchingEnabled"
238 "|64bitEnabled"
239 "|Exclusive"
240 "|MaxResumeLoops"
241 "|VmxPleGap"
242 "|VmxPleWindow"
243 "|UseVmxPreemptTimer"
244 "|SvmPauseFilter"
245 "|SvmPauseFilterThreshold"
246 "|SvmVirtVmsaveVmload"
247 "|SvmVGif"
248 "|LovelyMesaDrvWorkaround",
249 "" /* pszValidNodes */, "HM" /* pszWho */, 0 /* uInstance */);
250 if (RT_FAILURE(rc))
251 return rc;
252
253 /** @cfgm{/HM/HMForced, bool, false}
254 * Forces hardware virtualization, no falling back on raw-mode. HM must be
255 * enabled, i.e. /HMEnabled must be true. */
256 bool fHMForced;
257#ifdef VBOX_WITH_RAW_MODE
258 rc = CFGMR3QueryBoolDef(pCfgHm, "HMForced", &fHMForced, false);
259 AssertRCReturn(rc, rc);
260 AssertLogRelMsgReturn(!fHMForced || pVM->fHMEnabled, ("Configuration error: HM forced but not enabled!\n"),
261 VERR_INVALID_PARAMETER);
262# if defined(RT_OS_DARWIN)
263 if (pVM->fHMEnabled)
264 fHMForced = true;
265# endif
266 AssertLogRelMsgReturn(pVM->cCpus == 1 || pVM->fHMEnabled, ("Configuration error: SMP requires HM to be enabled!\n"),
267 VERR_INVALID_PARAMETER);
268 if (pVM->cCpus > 1)
269 fHMForced = true;
270#else /* !VBOX_WITH_RAW_MODE */
271 AssertRelease(pVM->fHMEnabled);
272 fHMForced = true;
273#endif /* !VBOX_WITH_RAW_MODE */
274
275 /** @cfgm{/HM/UseNEMInstead, bool, true}
276 * Don't use HM, use NEM instead. */
277 bool fUseNEMInstead = false;
278 rc = CFGMR3QueryBoolDef(pCfgHm, "UseNEMInstead", &fUseNEMInstead, false);
279 AssertRCReturn(rc, rc);
280 if (fUseNEMInstead && pVM->fHMEnabled)
281 {
282 LogRel(("HM: Setting fHMEnabled to false because fUseNEMInstead is set.\n"));
283 pVM->fHMEnabled = false;
284 }
285
286 /** @cfgm{/HM/FallbackToNEM, bool, true}
287 * Enables fallback on NEM. */
288 bool fFallbackToNEM = true;
289 rc = CFGMR3QueryBoolDef(pCfgHm, "FallbackToNEM", &fFallbackToNEM, true);
290 AssertRCReturn(rc, rc);
291
292 /** @cfgm{/HM/EnableNestedPaging, bool, false}
293 * Enables nested paging (aka extended page tables). */
294 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableNestedPaging", &pVM->hm.s.fAllowNestedPaging, false);
295 AssertRCReturn(rc, rc);
296
297 /** @cfgm{/HM/EnableUX, bool, true}
298 * Enables the VT-x unrestricted execution feature. */
299 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableUX", &pVM->hm.s.vmx.fAllowUnrestricted, true);
300 AssertRCReturn(rc, rc);
301
302 /** @cfgm{/HM/EnableLargePages, bool, false}
303 * Enables using large pages (2 MB) for guest memory, thus saving on (nested)
304 * page table walking and maybe better TLB hit rate in some cases. */
305 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableLargePages", &pVM->hm.s.fLargePages, false);
306 AssertRCReturn(rc, rc);
307
308 /** @cfgm{/HM/EnableVPID, bool, false}
309 * Enables the VT-x VPID feature. */
310 rc = CFGMR3QueryBoolDef(pCfgHm, "EnableVPID", &pVM->hm.s.vmx.fAllowVpid, false);
311 AssertRCReturn(rc, rc);
312
313 /** @cfgm{/HM/TPRPatchingEnabled, bool, false}
314 * Enables TPR patching for 32-bit windows guests with IO-APIC. */
315 rc = CFGMR3QueryBoolDef(pCfgHm, "TPRPatchingEnabled", &pVM->hm.s.fTprPatchingAllowed, false);
316 AssertRCReturn(rc, rc);
317
318 /** @cfgm{/HM/64bitEnabled, bool, 32-bit:false, 64-bit:true}
319 * Enables AMD64 cpu features.
320 * On 32-bit hosts this isn't default and require host CPU support. 64-bit hosts
321 * already have the support. */
322#ifdef VBOX_ENABLE_64_BITS_GUESTS
323 rc = CFGMR3QueryBoolDef(pCfgHm, "64bitEnabled", &pVM->hm.s.fAllow64BitGuests, HC_ARCH_BITS == 64);
324 AssertLogRelRCReturn(rc, rc);
325#else
326 pVM->hm.s.fAllow64BitGuests = false;
327#endif
328
329 /** @cfgm{/HM/VmxPleGap, uint32_t, 0}
330 * The pause-filter exiting gap in TSC ticks. When the number of ticks between
331 * two successive PAUSE instructions exceeds VmxPleGap, the CPU considers the
332 * latest PAUSE instruction to be start of a new PAUSE loop.
333 */
334 rc = CFGMR3QueryU32Def(pCfgHm, "VmxPleGap", &pVM->hm.s.vmx.cPleGapTicks, 0);
335 AssertRCReturn(rc, rc);
336
337 /** @cfgm{/HM/VmxPleWindow, uint32_t, 0}
338 * The pause-filter exiting window in TSC ticks. When the number of ticks
339 * between the current PAUSE instruction and first PAUSE of a loop exceeds
340 * VmxPleWindow, a VM-exit is triggered.
341 *
342 * Setting VmxPleGap and VmxPleGap to 0 disables pause-filter exiting.
343 */
344 rc = CFGMR3QueryU32Def(pCfgHm, "VmxPleWindow", &pVM->hm.s.vmx.cPleWindowTicks, 0);
345 AssertRCReturn(rc, rc);
346
347 /** @cfgm{/HM/SvmPauseFilterCount, uint16_t, 0}
348 * A counter that is decrement each time a PAUSE instruction is executed by the
349 * guest. When the counter is 0, a \#VMEXIT is triggered.
350 *
351 * Setting SvmPauseFilterCount to 0 disables pause-filter exiting.
352 */
353 rc = CFGMR3QueryU16Def(pCfgHm, "SvmPauseFilter", &pVM->hm.s.svm.cPauseFilter, 0);
354 AssertRCReturn(rc, rc);
355
356 /** @cfgm{/HM/SvmPauseFilterThreshold, uint16_t, 0}
357 * The pause filter threshold in ticks. When the elapsed time (in ticks) between
358 * two successive PAUSE instructions exceeds SvmPauseFilterThreshold, the
359 * PauseFilter count is reset to its initial value. However, if PAUSE is
360 * executed PauseFilter times within PauseFilterThreshold ticks, a VM-exit will
361 * be triggered.
362 *
363 * Requires SvmPauseFilterCount to be non-zero for pause-filter threshold to be
364 * activated.
365 */
366 rc = CFGMR3QueryU16Def(pCfgHm, "SvmPauseFilterThreshold", &pVM->hm.s.svm.cPauseFilterThresholdTicks, 0);
367 AssertRCReturn(rc, rc);
368
369 /** @cfgm{/HM/SvmVirtVmsaveVmload, bool, true}
370 * Whether to make use of virtualized VMSAVE/VMLOAD feature of the CPU if it's
371 * available. */
372 rc = CFGMR3QueryBoolDef(pCfgHm, "SvmVirtVmsaveVmload", &pVM->hm.s.svm.fVirtVmsaveVmload, true);
373 AssertRCReturn(rc, rc);
374
375 /** @cfgm{/HM/SvmVGif, bool, true}
376 * Whether to make use of Virtual GIF (Global Interrupt Flag) feature of the CPU
377 * if it's available. */
378 rc = CFGMR3QueryBoolDef(pCfgHm, "SvmVGif", &pVM->hm.s.svm.fVGif, true);
379 AssertRCReturn(rc, rc);
380
381 /** @cfgm{/HM/Exclusive, bool}
382 * Determines the init method for AMD-V and VT-x. If set to true, HM will do a
383 * global init for each host CPU. If false, we do local init each time we wish
384 * to execute guest code.
385 *
386 * On Windows, default is false due to the higher risk of conflicts with other
387 * hypervisors.
388 *
389 * On Mac OS X, this setting is ignored since the code does not handle local
390 * init when it utilizes the OS provided VT-x function, SUPR0EnableVTx().
391 */
392#if defined(RT_OS_DARWIN)
393 pVM->hm.s.fGlobalInit = true;
394#else
395 rc = CFGMR3QueryBoolDef(pCfgHm, "Exclusive", &pVM->hm.s.fGlobalInit,
396# if defined(RT_OS_WINDOWS)
397 false
398# else
399 true
400# endif
401 );
402 AssertLogRelRCReturn(rc, rc);
403#endif
404
405 /** @cfgm{/HM/MaxResumeLoops, uint32_t}
406 * The number of times to resume guest execution before we forcibly return to
407 * ring-3. The return value of RTThreadPreemptIsPendingTrusty in ring-0
408 * determines the default value. */
409 rc = CFGMR3QueryU32Def(pCfgHm, "MaxResumeLoops", &pVM->hm.s.cMaxResumeLoops, 0 /* set by R0 later */);
410 AssertLogRelRCReturn(rc, rc);
411
412 /** @cfgm{/HM/UseVmxPreemptTimer, bool}
413 * Whether to make use of the VMX-preemption timer feature of the CPU if it's
414 * available. */
415 rc = CFGMR3QueryBoolDef(pCfgHm, "UseVmxPreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimer, true);
416 AssertLogRelRCReturn(rc, rc);
417
418 /** @cfgm{/HM/IBPBOnVMExit, bool}
419 * Costly paranoia setting. */
420 rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMExit", &pVM->hm.s.fIbpbOnVmExit, false);
421 AssertLogRelRCReturn(rc, rc);
422
423 /** @cfgm{/HM/IBPBOnVMEntry, bool}
424 * Costly paranoia setting. */
425 rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMEntry", &pVM->hm.s.fIbpbOnVmEntry, false);
426 AssertLogRelRCReturn(rc, rc);
427
428 /** @cfgm{/HM/L1DFlushOnSched, bool, true}
429 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */
430 rc = CFGMR3QueryBoolDef(pCfgHm, "L1DFlushOnSched", &pVM->hm.s.fL1dFlushOnSched, true);
431 AssertLogRelRCReturn(rc, rc);
432
433 /** @cfgm{/HM/L1DFlushOnVMEntry, bool}
434 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */
435 rc = CFGMR3QueryBoolDef(pCfgHm, "L1DFlushOnVMEntry", &pVM->hm.s.fL1dFlushOnVmEntry, false);
436 AssertLogRelRCReturn(rc, rc);
437
438 /* Disable L1DFlushOnSched if L1DFlushOnVMEntry is enabled. */
439 if (pVM->hm.s.fL1dFlushOnVmEntry)
440 pVM->hm.s.fL1dFlushOnSched = false;
441
442 /** @cfgm{/HM/SpecCtrlByHost, bool}
443 * Another expensive paranoia setting. */
444 rc = CFGMR3QueryBoolDef(pCfgHm, "SpecCtrlByHost", &pVM->hm.s.fSpecCtrlByHost, false);
445 AssertLogRelRCReturn(rc, rc);
446
447 /** @cfgm{/HM/MDSClearOnSched, bool, true}
448 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround,
449 * ignored on CPUs that aren't affected. */
450 rc = CFGMR3QueryBoolDef(pCfgHm, "MDSClearOnSched", &pVM->hm.s.fMdsClearOnSched, true);
451 AssertLogRelRCReturn(rc, rc);
452
453 /** @cfgm{/HM/MDSClearOnVmEntry, bool, false}
454 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround,
455 * ignored on CPUs that aren't affected. */
456 rc = CFGMR3QueryBoolDef(pCfgHm, "MDSClearOnVmEntry", &pVM->hm.s.fMdsClearOnVmEntry, false);
457 AssertLogRelRCReturn(rc, rc);
458
459 /* Disable MDSClearOnSched if MDSClearOnVmEntry is enabled. */
460 if (pVM->hm.s.fMdsClearOnVmEntry)
461 pVM->hm.s.fMdsClearOnSched = false;
462
463 /** @cfgm{/HM/LovelyMesaDrvWorkaround,bool}
464 * Workaround for mesa vmsvga 3d driver making incorrect assumptions about
465 * the hypervisor it is running under. */
466 bool f;
467 rc = CFGMR3QueryBoolDef(pCfgHm, "LovelyMesaDrvWorkaround", &f, false);
468 AssertLogRelRCReturn(rc, rc);
469 for (VMCPUID i = 0; i < pVM->cCpus; i++)
470 pVM->aCpus[i].hm.s.fTrapXcptGpForLovelyMesaDrv = f;
471
472 /*
473 * Check if VT-x or AMD-v support according to the users wishes.
474 */
475 /** @todo SUPR3QueryVTCaps won't catch VERR_VMX_IN_VMX_ROOT_MODE or
476 * VERR_SVM_IN_USE. */
477 if (pVM->fHMEnabled)
478 {
479 uint32_t fCaps;
480 rc = SUPR3QueryVTCaps(&fCaps);
481 if (RT_SUCCESS(rc))
482 {
483 if (fCaps & SUPVTCAPS_AMD_V)
484 {
485 pVM->hm.s.svm.fSupported = true;
486 LogRel(("HM: HMR3Init: AMD-V%s\n", fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : ""));
487 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_HW_VIRT);
488 }
489 else if (fCaps & SUPVTCAPS_VT_X)
490 {
491 const char *pszWhy;
492 rc = SUPR3QueryVTxSupported(&pszWhy);
493 if (RT_SUCCESS(rc))
494 {
495 pVM->hm.s.vmx.fSupported = true;
496 LogRel(("HM: HMR3Init: VT-x%s%s%s\n",
497 fCaps & SUPVTCAPS_NESTED_PAGING ? " w/ nested paging" : "",
498 fCaps & SUPVTCAPS_VTX_UNRESTRICTED_GUEST ? " and unrestricted guest execution" : "",
499 (fCaps & (SUPVTCAPS_NESTED_PAGING | SUPVTCAPS_VTX_UNRESTRICTED_GUEST)) ? " hw support" : ""));
500 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_HW_VIRT);
501 }
502 else
503 {
504 /*
505 * Before failing, try fallback to NEM if we're allowed to do that.
506 */
507 pVM->fHMEnabled = false;
508 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET);
509 if (fFallbackToNEM)
510 {
511 LogRel(("HM: HMR3Init: Attempting fall back to NEM: The host kernel does not support VT-x - %s\n", pszWhy));
512 int rc2 = NEMR3Init(pVM, true /*fFallback*/, fHMForced);
513
514 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
515 if ( RT_SUCCESS(rc2)
516 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET)
517 rc = VINF_SUCCESS;
518 }
519 if (RT_FAILURE(rc))
520 {
521 if (fHMForced)
522 return VMSetError(pVM, rc, RT_SRC_POS, "The host kernel does not support VT-x: %s\n", pszWhy);
523
524 /* Fall back to raw-mode. */
525 LogRel(("HM: HMR3Init: Falling back to raw-mode: The host kernel does not support VT-x - %s\n", pszWhy));
526 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_RAW_MODE);
527 }
528 }
529 }
530 else
531 AssertLogRelMsgFailedReturn(("SUPR3QueryVTCaps didn't return either AMD-V or VT-x flag set (%#x)!\n", fCaps),
532 VERR_INTERNAL_ERROR_5);
533
534 /*
535 * Do we require a little bit or raw-mode for 64-bit guest execution?
536 */
537 pVM->fHMNeedRawModeCtx = HC_ARCH_BITS == 32
538 && pVM->fHMEnabled
539 && pVM->hm.s.fAllow64BitGuests;
540
541 /*
542 * Disable nested paging and unrestricted guest execution now if they're
543 * configured so that CPUM can make decisions based on our configuration.
544 */
545 Assert(!pVM->hm.s.fNestedPaging);
546 if (pVM->hm.s.fAllowNestedPaging)
547 {
548 if (fCaps & SUPVTCAPS_NESTED_PAGING)
549 pVM->hm.s.fNestedPaging = true;
550 else
551 pVM->hm.s.fAllowNestedPaging = false;
552 }
553
554 if (fCaps & SUPVTCAPS_VT_X)
555 {
556 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);
557 if (pVM->hm.s.vmx.fAllowUnrestricted)
558 {
559 if ( (fCaps & SUPVTCAPS_VTX_UNRESTRICTED_GUEST)
560 && pVM->hm.s.fNestedPaging)
561 pVM->hm.s.vmx.fUnrestrictedGuest = true;
562 else
563 pVM->hm.s.vmx.fAllowUnrestricted = false;
564 }
565 }
566 }
567 else
568 {
569 const char *pszMsg;
570 switch (rc)
571 {
572 case VERR_UNSUPPORTED_CPU: pszMsg = "Unknown CPU, VT-x or AMD-v features cannot be ascertained"; break;
573 case VERR_VMX_NO_VMX: pszMsg = "VT-x is not available"; break;
574 case VERR_VMX_MSR_VMX_DISABLED: pszMsg = "VT-x is disabled in the BIOS"; break;
575 case VERR_VMX_MSR_ALL_VMX_DISABLED: pszMsg = "VT-x is disabled in the BIOS for all CPU modes"; break;
576 case VERR_VMX_MSR_LOCKING_FAILED: pszMsg = "Failed to enable and lock VT-x features"; break;
577 case VERR_SVM_NO_SVM: pszMsg = "AMD-V is not available"; break;
578 case VERR_SVM_DISABLED: pszMsg = "AMD-V is disabled in the BIOS (or by the host OS)"; break;
579 default:
580 return VMSetError(pVM, rc, RT_SRC_POS, "SUPR3QueryVTCaps failed with %Rrc", rc);
581 }
582
583 /*
584 * Before failing, try fallback to NEM if we're allowed to do that.
585 */
586 pVM->fHMEnabled = false;
587 if (fFallbackToNEM)
588 {
589 LogRel(("HM: HMR3Init: Attempting fall back to NEM: %s\n", pszMsg));
590 int rc2 = NEMR3Init(pVM, true /*fFallback*/, fHMForced);
591 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
592 if ( RT_SUCCESS(rc2)
593 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET)
594 rc = VINF_SUCCESS;
595 }
596 if (RT_FAILURE(rc))
597 {
598 if (fHMForced)
599 return VM_SET_ERROR(pVM, rc, pszMsg);
600
601 LogRel(("HM: HMR3Init: Falling back to raw-mode: %s\n", pszMsg));
602 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_RAW_MODE);
603 }
604 }
605 }
606 else
607 {
608 /*
609 * Disabled HM mean raw-mode, unless NEM is supposed to be used.
610 */
611 if (!fUseNEMInstead)
612 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_RAW_MODE);
613 else
614 {
615 rc = NEMR3Init(pVM, false /*fFallback*/, true);
616 ASMCompilerBarrier(); /* NEMR3Init may have changed bMainExecutionEngine. */
617 if (RT_FAILURE(rc))
618 return rc;
619 }
620 }
621
622 return VINF_SUCCESS;
623}
624
625
626/**
627 * Initializes HM components after ring-3 phase has been fully initialized.
628 *
629 * @returns VBox status code.
630 * @param pVM The cross context VM structure.
631 */
632static int hmR3InitFinalizeR3(PVM pVM)
633{
634 LogFlowFunc(("\n"));
635
636 if (!HMIsEnabled(pVM))
637 return VINF_SUCCESS;
638
639 for (VMCPUID i = 0; i < pVM->cCpus; i++)
640 {
641 PVMCPU pVCpu = &pVM->aCpus[i];
642 pVCpu->hm.s.fActive = false;
643 pVCpu->hm.s.fGIMTrapXcptUD = GIMShouldTrapXcptUD(pVCpu); /* Is safe to call now since GIMR3Init() has completed. */
644 }
645
646#ifdef VBOX_WITH_STATISTICS
647 STAM_REG(pVM, &pVM->hm.s.StatTprPatchSuccess, STAMTYPE_COUNTER, "/HM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
648 STAM_REG(pVM, &pVM->hm.s.StatTprPatchFailure, STAMTYPE_COUNTER, "/HM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
649 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccessCr8, STAMTYPE_COUNTER, "/HM/TPR/Replace/SuccessCR8", STAMUNIT_OCCURENCES, "Number of instruction replacements by MOV CR8.");
650 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccessVmc, STAMTYPE_COUNTER, "/HM/TPR/Replace/SuccessVMC", STAMUNIT_OCCURENCES, "Number of instruction replacements by VMMCALL.");
651 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceFailure, STAMTYPE_COUNTER, "/HM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful replace attempts.");
652#endif
653
654 /*
655 * Statistics.
656 */
657 for (VMCPUID i = 0; i < pVM->cCpus; i++)
658 {
659 PVMCPU pVCpu = &pVM->aCpus[i];
660 int rc;
661
662#ifdef VBOX_WITH_STATISTICS
663 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
664 "Profiling of RTMpPokeCpu.",
665 "/PROF/CPU%d/HM/Poke", i);
666 AssertRC(rc);
667 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
668 "Profiling of poke wait.",
669 "/PROF/CPU%d/HM/PokeWait", i);
670 AssertRC(rc);
671 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
672 "Profiling of poke wait when RTMpPokeCpu fails.",
673 "/PROF/CPU%d/HM/PokeWaitFailed", i);
674 AssertRC(rc);
675 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
676 "Profiling of entry until entering GC.",
677 "/PROF/CPU%d/HM/Entry", i);
678 AssertRC(rc);
679 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatPreExit, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
680 "Profiling of pre-exit processing after returning from GC.",
681 "/PROF/CPU%d/HM/SwitchFromGC_1", i);
682 AssertRC(rc);
683 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitHandling, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
684 "Profiling of exit handling (longjmps not included!)",
685 "/PROF/CPU%d/HM/SwitchFromGC_2", i);
686 AssertRC(rc);
687
688 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitIO, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
689 "I/O.",
690 "/PROF/CPU%d/HM/SwitchFromGC_2/IO", i);
691 AssertRC(rc);
692 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitMovCRx, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
693 "MOV CRx.",
694 "/PROF/CPU%d/HM/SwitchFromGC_2/MovCRx", i);
695 AssertRC(rc);
696 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitXcptNmi, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
697 "Exceptions, NMIs.",
698 "/PROF/CPU%d/HM/SwitchFromGC_2/XcptNmi", i);
699 AssertRC(rc);
700 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitVmentry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
701 "VMLAUNCH/VMRESUME on Intel or VMRUN on AMD.",
702 "/PROF/CPU%d/HM/SwitchFromGC_2/Vmentry", i);
703 AssertRC(rc);
704 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatImportGuestState, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
705 "Profiling of importing guest state from hardware after VM-exit.",
706 "/PROF/CPU%d/HM/ImportGuestState", i);
707 AssertRC(rc);
708 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExportGuestState, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
709 "Profiling of exporting guest state to hardware before VM-entry.",
710 "/PROF/CPU%d/HM/ExportGuestState", i);
711 AssertRC(rc);
712 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatLoadGuestFpuState, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
713 "Profiling of CPUMR0LoadGuestFPU.",
714 "/PROF/CPU%d/HM/LoadGuestFpuState", i);
715 AssertRC(rc);
716 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
717 "Profiling of execution of guest-code in hardware.",
718 "/PROF/CPU%d/HM/InGC", i);
719 AssertRC(rc);
720
721# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
722 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED,
723 STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher.",
724 "/PROF/CPU%d/HM/Switcher3264", i);
725 AssertRC(rc);
726# endif
727
728# ifdef HM_PROFILE_EXIT_DISPATCH
729 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitDispatch, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_USED,
730 STAMUNIT_TICKS_PER_CALL, "Profiling the dispatching of exit handlers.",
731 "/PROF/CPU%d/HM/ExitDispatch", i);
732 AssertRC(rc);
733# endif
734
735#endif
736# define HM_REG_COUNTER(a, b, desc) \
737 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, desc, b, i); \
738 AssertRC(rc);
739
740#ifdef VBOX_WITH_STATISTICS
741 HM_REG_COUNTER(&pVCpu->hm.s.StatExitAll, "/HM/CPU%d/Exit/All", "Total exits (including nested-guest exits).");
742 HM_REG_COUNTER(&pVCpu->hm.s.StatNestedExitAll, "/HM/CPU%d/Exit/NestedGuest/All", "Total nested-guest exits.");
743 HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowNM, "/HM/CPU%d/Exit/Trap/Shw/#NM", "Shadow #NM (device not available, no math co-processor) exception.");
744 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestNM, "/HM/CPU%d/Exit/Trap/Gst/#NM", "Guest #NM (device not available, no math co-processor) exception.");
745 HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowPF, "/HM/CPU%d/Exit/Trap/Shw/#PF", "Shadow #PF (page fault) exception.");
746 HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowPFEM, "/HM/CPU%d/Exit/Trap/Shw/#PF-EM", "#PF (page fault) exception going back to ring-3 for emulating the instruction.");
747 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestPF, "/HM/CPU%d/Exit/Trap/Gst/#PF", "Guest #PF (page fault) exception.");
748 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestUD, "/HM/CPU%d/Exit/Trap/Gst/#UD", "Guest #UD (undefined opcode) exception.");
749 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestSS, "/HM/CPU%d/Exit/Trap/Gst/#SS", "Guest #SS (stack-segment fault) exception.");
750 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestNP, "/HM/CPU%d/Exit/Trap/Gst/#NP", "Guest #NP (segment not present) exception.");
751 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestGP, "/HM/CPU%d/Exit/Trap/Gst/#GP", "Guest #GP (general protection) exception.");
752 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestMF, "/HM/CPU%d/Exit/Trap/Gst/#MF", "Guest #MF (x87 FPU error, math fault) exception.");
753 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestDE, "/HM/CPU%d/Exit/Trap/Gst/#DE", "Guest #DE (divide error) exception.");
754 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestDB, "/HM/CPU%d/Exit/Trap/Gst/#DB", "Guest #DB (debug) exception.");
755 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestBP, "/HM/CPU%d/Exit/Trap/Gst/#BP", "Guest #BP (breakpoint) exception.");
756 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXF, "/HM/CPU%d/Exit/Trap/Gst/#XF", "Guest #XF (extended math fault, SIMD FPU) exception.");
757 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXcpUnk, "/HM/CPU%d/Exit/Trap/Gst/Other", "Other guest exceptions.");
758 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr, "/HM/CPU%d/Exit/Instr/Rdmsr", "MSR read.");
759 HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr, "/HM/CPU%d/Exit/Instr/Wrmsr", "MSR write.");
760 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxWrite, "/HM/CPU%d/Exit/Instr/DR-Write", "Debug register write.");
761 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxRead, "/HM/CPU%d/Exit/Instr/DR-Read", "Debug register read.");
762 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR0Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR0", "CR0 read.");
763 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR2Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR2", "CR2 read.");
764 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR3Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR3", "CR3 read.");
765 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR4Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR4", "CR4 read.");
766 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR8Read, "/HM/CPU%d/Exit/Instr/CR-Read/CR8", "CR8 read.");
767 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR0Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR0", "CR0 write.");
768 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR2Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR2", "CR2 write.");
769 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR3Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR3", "CR3 write.");
770 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR4Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR4", "CR4 write.");
771 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCR8Write, "/HM/CPU%d/Exit/Instr/CR-Write/CR8", "CR8 write.");
772 HM_REG_COUNTER(&pVCpu->hm.s.StatExitClts, "/HM/CPU%d/Exit/Instr/CLTS", "CLTS instruction.");
773 HM_REG_COUNTER(&pVCpu->hm.s.StatExitLmsw, "/HM/CPU%d/Exit/Instr/LMSW", "LMSW instruction.");
774 HM_REG_COUNTER(&pVCpu->hm.s.StatExitXdtrAccess, "/HM/CPU%d/Exit/Instr/XdtrAccess", "GDTR, IDTR, LDTR access.");
775 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite, "/HM/CPU%d/Exit/Instr/IO/Write", "I/O write.");
776 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIORead, "/HM/CPU%d/Exit/Instr/IO/Read", "I/O read.");
777 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringWrite, "/HM/CPU%d/Exit/Instr/IO/WriteString", "String I/O write.");
778 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringRead, "/HM/CPU%d/Exit/Instr/IO/ReadString", "String I/O read.");
779 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIntWindow, "/HM/CPU%d/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts.");
780 HM_REG_COUNTER(&pVCpu->hm.s.StatExitExtInt, "/HM/CPU%d/Exit/ExtInt", "Physical maskable interrupt (host).");
781#endif
782 HM_REG_COUNTER(&pVCpu->hm.s.StatExitHostNmiInGC, "/HM/CPU%d/Exit/HostNmiInGC", "Host NMI received while in guest context.");
783#ifdef VBOX_WITH_STATISTICS
784 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPreemptTimer, "/HM/CPU%d/Exit/PreemptTimer", "VMX-preemption timer expired.");
785 HM_REG_COUNTER(&pVCpu->hm.s.StatExitTprBelowThreshold, "/HM/CPU%d/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest.");
786 HM_REG_COUNTER(&pVCpu->hm.s.StatExitTaskSwitch, "/HM/CPU%d/Exit/TaskSwitch", "Task switch.");
787 HM_REG_COUNTER(&pVCpu->hm.s.StatExitApicAccess, "/HM/CPU%d/Exit/ApicAccess", "APIC access. Guest attempted to access memory at a physical address on the APIC-access page.");
788
789 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchTprMaskedIrq, "/HM/CPU%d/Switch/TprMaskedIrq", "PDMGetInterrupt() signals TPR masks pending Irq.");
790 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchGuestIrq, "/HM/CPU%d/Switch/IrqPending", "PDMGetInterrupt() cleared behind our back!?!.");
791 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchPendingHostIrq, "/HM/CPU%d/Switch/PendingHostIrq", "Exit to ring-3 due to pending host interrupt before executing guest code.");
792 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchHmToR3FF, "/HM/CPU%d/Switch/HmToR3FF", "Exit to ring-3 due to pending timers, EMT rendezvous, critical section etc.");
793 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchVmReq, "/HM/CPU%d/Switch/VmReq", "Exit to ring-3 due to pending VM requests.");
794 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchPgmPoolFlush, "/HM/CPU%d/Switch/PgmPoolFlush", "Exit to ring-3 due to pending PGM pool flush.");
795 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchDma, "/HM/CPU%d/Switch/PendingDma", "Exit to ring-3 due to pending DMA requests.");
796 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchExitToR3, "/HM/CPU%d/Switch/ExitToR3", "Exit to ring-3 (total).");
797 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchLongJmpToR3, "/HM/CPU%d/Switch/LongJmpToR3", "Longjump to ring-3.");
798 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchMaxResumeLoops, "/HM/CPU%d/Switch/MaxResumeLoops", "Maximum VMRESUME inner-loop counter reached.");
799 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchHltToR3, "/HM/CPU%d/Switch/HltToR3", "HLT causing us to go to ring-3.");
800 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchApicAccessToR3, "/HM/CPU%d/Switch/ApicAccessToR3", "APIC access causing us to go to ring-3.");
801#endif
802 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchPreempt, "/HM/CPU%d/Switch/Preempting", "EMT has been preempted while in HM context.");
803#ifdef VBOX_WITH_STATISTICS
804 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchNstGstVmexit, "/HM/CPU%d/Switch/NstGstVmexit", "Nested-guest VM-exit occurred.");
805
806 HM_REG_COUNTER(&pVCpu->hm.s.StatInjectInterrupt, "/HM/CPU%d/EventInject/Interrupt", "Injected an external interrupt into the guest.");
807 HM_REG_COUNTER(&pVCpu->hm.s.StatInjectXcpt, "/HM/CPU%d/EventInject/Trap", "Injected an exception into the guest.");
808 HM_REG_COUNTER(&pVCpu->hm.s.StatInjectPendingReflect, "/HM/CPU%d/EventInject/PendingReflect", "Reflecting an exception (or #DF) caused due to event injection.");
809 HM_REG_COUNTER(&pVCpu->hm.s.StatInjectPendingInterpret, "/HM/CPU%d/EventInject/PendingInterpret", "Falling to interpreter for handling exception caused due to event injection.");
810
811 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPage, "/HM/CPU%d/Flush/Page", "Invalidating a guest page on all guest CPUs.");
812 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPageManual, "/HM/CPU%d/Flush/Page/Virt", "Invalidating a guest page using guest-virtual address.");
813 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPhysPageManual, "/HM/CPU%d/Flush/Page/Phys", "Invalidating a guest page using guest-physical address.");
814 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlb, "/HM/CPU%d/Flush/TLB", "Forcing a full guest-TLB flush (ring-0).");
815 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbManual, "/HM/CPU%d/Flush/TLB/Manual", "Request a full guest-TLB flush.");
816 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbWorldSwitch, "/HM/CPU%d/Flush/TLB/CpuSwitch", "Forcing a full guest-TLB flush due to host-CPU reschedule or ASID-limit hit by another guest-VCPU.");
817 HM_REG_COUNTER(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch, "/HM/CPU%d/Flush/TLB/Skipped", "No TLB flushing required.");
818 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushEntire, "/HM/CPU%d/Flush/TLB/Entire", "Flush the entire TLB (host + guest).");
819 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushAsid, "/HM/CPU%d/Flush/TLB/ASID", "Flushed guest-TLB entries for the current VPID.");
820 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushNestedPaging, "/HM/CPU%d/Flush/TLB/NestedPaging", "Flushed guest-TLB entries for the current EPT.");
821 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbInvlpgVirt, "/HM/CPU%d/Flush/TLB/InvlpgVirt", "Invalidated a guest-TLB entry for a guest-virtual address.");
822 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTlbInvlpgPhys, "/HM/CPU%d/Flush/TLB/InvlpgPhys", "Currently not possible, flushes entire guest-TLB.");
823 HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdown, "/HM/CPU%d/Flush/Shootdown/Page", "Inter-VCPU request to flush queued guest page.");
824 HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdownFlush, "/HM/CPU%d/Flush/Shootdown/TLB", "Inter-VCPU request to flush entire guest-TLB.");
825
826 HM_REG_COUNTER(&pVCpu->hm.s.StatTscParavirt, "/HM/CPU%d/TSC/Paravirt", "Paravirtualized TSC in effect.");
827 HM_REG_COUNTER(&pVCpu->hm.s.StatTscOffset, "/HM/CPU%d/TSC/Offset", "TSC offsetting is in effect.");
828 HM_REG_COUNTER(&pVCpu->hm.s.StatTscIntercept, "/HM/CPU%d/TSC/Intercept", "Intercept TSC accesses.");
829
830 HM_REG_COUNTER(&pVCpu->hm.s.StatDRxArmed, "/HM/CPU%d/Debug/Armed", "Loaded guest-debug state while loading guest-state.");
831 HM_REG_COUNTER(&pVCpu->hm.s.StatDRxContextSwitch, "/HM/CPU%d/Debug/ContextSwitch", "Loaded guest-debug state on MOV DRx.");
832 HM_REG_COUNTER(&pVCpu->hm.s.StatDRxIoCheck, "/HM/CPU%d/Debug/IOCheck", "Checking for I/O breakpoint.");
833
834 HM_REG_COUNTER(&pVCpu->hm.s.StatExportMinimal, "/HM/CPU%d/Export/Minimal", "VM-entry exporting minimal guest-state.");
835 HM_REG_COUNTER(&pVCpu->hm.s.StatExportFull, "/HM/CPU%d/Export/Full", "VM-entry exporting the full guest-state.");
836 HM_REG_COUNTER(&pVCpu->hm.s.StatLoadGuestFpu, "/HM/CPU%d/Export/GuestFpu", "VM-entry loading the guest-FPU state.");
837 HM_REG_COUNTER(&pVCpu->hm.s.StatExportHostState, "/HM/CPU%d/Export/HostState", "VM-entry exporting host-state.");
838
839 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadRmSelBase, "/HM/CPU%d/VMXCheck/RMSelBase", "Could not use VMX due to unsuitable real-mode selector base.");
840 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit, "/HM/CPU%d/VMXCheck/RMSelLimit", "Could not use VMX due to unsuitable real-mode selector limit.");
841 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadRmSelAttr, "/HM/CPU%d/VMXCheck/RMSelAttrs", "Could not use VMX due to unsuitable real-mode selector attributes.");
842
843 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadV86SelBase, "/HM/CPU%d/VMXCheck/V86SelBase", "Could not use VMX due to unsuitable v8086-mode selector base.");
844 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadV86SelLimit, "/HM/CPU%d/VMXCheck/V86SelLimit", "Could not use VMX due to unsuitable v8086-mode selector limit.");
845 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadV86SelAttr, "/HM/CPU%d/VMXCheck/V86SelAttrs", "Could not use VMX due to unsuitable v8086-mode selector attributes.");
846
847 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckRmOk, "/HM/CPU%d/VMXCheck/VMX_RM", "VMX execution in real (V86) mode OK.");
848 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadSel, "/HM/CPU%d/VMXCheck/Selector", "Could not use VMX due to unsuitable selector.");
849 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckBadRpl, "/HM/CPU%d/VMXCheck/RPL", "Could not use VMX due to unsuitable RPL.");
850 HM_REG_COUNTER(&pVCpu->hm.s.StatVmxCheckPmOk, "/HM/CPU%d/VMXCheck/VMX_PM", "VMX execution in protected mode OK.");
851
852#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
853 HM_REG_COUNTER(&pVCpu->hm.s.StatFpu64SwitchBack, "/HM/CPU%d/Switch64/Fpu", "Saving guest FPU/XMM state.");
854 HM_REG_COUNTER(&pVCpu->hm.s.StatDebug64SwitchBack, "/HM/CPU%d/Switch64/Debug", "Saving guest debug state.");
855#endif
856
857#undef HM_REG_COUNTER
858
859 bool const fCpuSupportsVmx = ASMIsIntelCpu() || ASMIsViaCentaurCpu() || ASMIsShanghaiCpu();
860
861 /*
862 * Guest Exit reason stats.
863 */
864 pVCpu->hm.s.paStatExitReason = NULL;
865 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT * sizeof(*pVCpu->hm.s.paStatExitReason), 0 /* uAlignment */, MM_TAG_HM,
866 (void **)&pVCpu->hm.s.paStatExitReason);
867 AssertRCReturn(rc, rc);
868
869 if (fCpuSupportsVmx)
870 {
871 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
872 {
873 const char *pszExitName = HMGetVmxExitName(j);
874 if (pszExitName)
875 {
876 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
877 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%d/Exit/Reason/%02x", i, j);
878 AssertRCReturn(rc, rc);
879 }
880 }
881 }
882 else
883 {
884 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
885 {
886 const char *pszExitName = HMGetSvmExitName(j);
887 if (pszExitName)
888 {
889 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
890 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%d/Exit/Reason/%02x", i, j);
891 AssertRCReturn(rc, rc);
892 }
893 }
894 }
895 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitReasonNpf, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
896 "Nested page fault", "/HM/CPU%d/Exit/Reason/#NPF", i);
897 AssertRCReturn(rc, rc);
898 pVCpu->hm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatExitReason);
899# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
900 Assert(pVCpu->hm.s.paStatExitReasonR0 != NIL_RTR0PTR || !HMIsEnabled(pVM));
901# else
902 Assert(pVCpu->hm.s.paStatExitReasonR0 != NIL_RTR0PTR);
903# endif
904
905#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
906 /*
907 * Nested-guest VM-exit reason stats.
908 */
909 pVCpu->hm.s.paStatNestedExitReason = NULL;
910 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT * sizeof(*pVCpu->hm.s.paStatNestedExitReason), 0 /* uAlignment */, MM_TAG_HM,
911 (void **)&pVCpu->hm.s.paStatNestedExitReason);
912 AssertRCReturn(rc, rc);
913 if (fCpuSupportsVmx)
914 {
915 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
916 {
917 const char *pszExitName = HMGetVmxExitName(j);
918 if (pszExitName)
919 {
920 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
921 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%d/Exit/NestedGuest/Reason/%02x", i, j);
922 AssertRC(rc);
923 }
924 }
925 }
926 else
927 {
928 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
929 {
930 const char *pszExitName = HMGetSvmExitName(j);
931 if (pszExitName)
932 {
933 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatNestedExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
934 STAMUNIT_OCCURENCES, pszExitName, "/HM/CPU%d/Exit/NestedGuest/Reason/%02x", i, j);
935 AssertRC(rc);
936 }
937 }
938 }
939 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatNestedExitReasonNpf, STAMTYPE_COUNTER, STAMVISIBILITY_USED,
940 STAMUNIT_OCCURENCES, "Nested page fault", "/HM/CPU%d/Exit/NestedGuest/Reason/#NPF", i);
941 AssertRCReturn(rc, rc);
942 pVCpu->hm.s.paStatNestedExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatNestedExitReason);
943# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
944 Assert(pVCpu->hm.s.paStatNestedExitReasonR0 != NIL_RTR0PTR || !HMIsEnabled(pVM));
945# else
946 Assert(pVCpu->hm.s.paStatNestedExitReasonR0 != NIL_RTR0PTR);
947# endif
948#endif
949
950 /*
951 * Injected events stats.
952 */
953 rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_HM, (void **)&pVCpu->hm.s.paStatInjectedIrqs);
954 AssertRCReturn(rc, rc);
955 pVCpu->hm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatInjectedIrqs);
956# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
957 Assert(pVCpu->hm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !HMIsEnabled(pVM));
958# else
959 Assert(pVCpu->hm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR);
960# endif
961 for (unsigned j = 0; j < 255; j++)
962 {
963 STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
964 "Injected event.",
965 (j < 0x20) ? "/HM/CPU%d/EventInject/InjectTrap/%02X" : "/HM/CPU%d/EventInject/InjectIRQ/%02X", i, j);
966 }
967
968#endif /* VBOX_WITH_STATISTICS */
969 }
970
971#ifdef VBOX_WITH_CRASHDUMP_MAGIC
972 /*
973 * Magic marker for searching in crash dumps.
974 */
975 for (VMCPUID i = 0; i < pVM->cCpus; i++)
976 {
977 PVMCPU pVCpu = &pVM->aCpus[i];
978
979 PVMXVMCSCACHE pVmcsCache = &pVCpu->hm.s.vmx.VmcsCache;
980 strcpy((char *)pVmcsCache->aMagic, "VMCSCACHE Magic");
981 pVmcsCache->uMagic = UINT64_C(0xdeadbeefdeadbeef);
982 }
983#endif
984
985 return VINF_SUCCESS;
986}
987
988
989/**
990 * Called when a init phase has completed.
991 *
992 * @returns VBox status code.
993 * @param pVM The cross context VM structure.
994 * @param enmWhat The phase that completed.
995 */
996VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
997{
998 switch (enmWhat)
999 {
1000 case VMINITCOMPLETED_RING3:
1001 return hmR3InitFinalizeR3(pVM);
1002 case VMINITCOMPLETED_RING0:
1003 return hmR3InitFinalizeR0(pVM);
1004 default:
1005 return VINF_SUCCESS;
1006 }
1007}
1008
1009
1010/**
1011 * Turns off normal raw mode features.
1012 *
1013 * @param pVM The cross context VM structure.
1014 */
1015static void hmR3DisableRawMode(PVM pVM)
1016{
1017/** @todo r=bird: HM shouldn't be doing this crap. */
1018 /* Reinit the paging mode to force the new shadow mode. */
1019 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1020 {
1021 PVMCPU pVCpu = &pVM->aCpus[i];
1022 PGMHCChangeMode(pVM, pVCpu, PGMMODE_REAL);
1023 }
1024}
1025
1026
1027/**
1028 * Initialize VT-x or AMD-V.
1029 *
1030 * @returns VBox status code.
1031 * @param pVM The cross context VM structure.
1032 */
1033static int hmR3InitFinalizeR0(PVM pVM)
1034{
1035 int rc;
1036
1037 if (!HMIsEnabled(pVM))
1038 return VINF_SUCCESS;
1039
1040 /*
1041 * Hack to allow users to work around broken BIOSes that incorrectly set
1042 * EFER.SVME, which makes us believe somebody else is already using AMD-V.
1043 */
1044 if ( !pVM->hm.s.vmx.fSupported
1045 && !pVM->hm.s.svm.fSupported
1046 && pVM->hm.s.rcInit == VERR_SVM_IN_USE /* implies functional AMD-V */
1047 && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
1048 {
1049 LogRel(("HM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
1050 pVM->hm.s.svm.fSupported = true;
1051 pVM->hm.s.svm.fIgnoreInUseError = true;
1052 pVM->hm.s.rcInit = VINF_SUCCESS;
1053 }
1054
1055 /*
1056 * Report ring-0 init errors.
1057 */
1058 if ( !pVM->hm.s.vmx.fSupported
1059 && !pVM->hm.s.svm.fSupported)
1060 {
1061 LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.rcInit));
1062 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.Msrs.u64FeatCtrl));
1063 switch (pVM->hm.s.rcInit)
1064 {
1065 case VERR_VMX_IN_VMX_ROOT_MODE:
1066 return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor");
1067 case VERR_VMX_NO_VMX:
1068 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available");
1069 case VERR_VMX_MSR_VMX_DISABLED:
1070 return VM_SET_ERROR(pVM, VERR_VMX_MSR_VMX_DISABLED, "VT-x is disabled in the BIOS");
1071 case VERR_VMX_MSR_ALL_VMX_DISABLED:
1072 return VM_SET_ERROR(pVM, VERR_VMX_MSR_ALL_VMX_DISABLED, "VT-x is disabled in the BIOS for all CPU modes");
1073 case VERR_VMX_MSR_LOCKING_FAILED:
1074 return VM_SET_ERROR(pVM, VERR_VMX_MSR_LOCKING_FAILED, "Failed to lock VT-x features while trying to enable VT-x");
1075 case VERR_VMX_MSR_VMX_ENABLE_FAILED:
1076 return VM_SET_ERROR(pVM, VERR_VMX_MSR_VMX_ENABLE_FAILED, "Failed to enable VT-x features");
1077 case VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED:
1078 return VM_SET_ERROR(pVM, VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED, "Failed to enable VT-x features in SMX mode");
1079
1080 case VERR_SVM_IN_USE:
1081 return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor");
1082 case VERR_SVM_NO_SVM:
1083 return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available");
1084 case VERR_SVM_DISABLED:
1085 return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS");
1086 }
1087 return VMSetError(pVM, pVM->hm.s.rcInit, RT_SRC_POS, "HM ring-0 init failed: %Rrc", pVM->hm.s.rcInit);
1088 }
1089
1090 /*
1091 * Enable VT-x or AMD-V on all host CPUs.
1092 */
1093 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_ENABLE, 0, NULL);
1094 if (RT_FAILURE(rc))
1095 {
1096 LogRel(("HM: Failed to enable, error %Rrc\n", rc));
1097 HMR3CheckError(pVM, rc);
1098 return rc;
1099 }
1100
1101 /*
1102 * No TPR patching is required when the IO-APIC is not enabled for this VM.
1103 * (Main should have taken care of this already)
1104 */
1105 if (!PDMHasIoApic(pVM))
1106 {
1107 Assert(!pVM->hm.s.fTprPatchingAllowed); /* paranoia */
1108 pVM->hm.s.fTprPatchingAllowed = false;
1109 }
1110
1111 /*
1112 * Check if L1D flush is needed/possible.
1113 */
1114 if ( !pVM->cpum.ro.HostFeatures.fFlushCmd
1115 || pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem
1116 || pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_End
1117 || pVM->cpum.ro.HostFeatures.fArchVmmNeedNotFlushL1d
1118 || pVM->cpum.ro.HostFeatures.fArchRdclNo)
1119 pVM->hm.s.fL1dFlushOnSched = pVM->hm.s.fL1dFlushOnVmEntry = false;
1120
1121 /*
1122 * Check if MDS flush is needed/possible.
1123 * On atoms and knight family CPUs, we will only allow clearing on scheduling.
1124 */
1125 if ( !pVM->cpum.ro.HostFeatures.fMdsClear
1126 || pVM->cpum.ro.HostFeatures.fArchMdsNo)
1127 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry = false;
1128 else if ( ( pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Atom_Airmount
1129 && pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Atom_End)
1130 || ( pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Phi_KnightsLanding
1131 && pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Phi_End))
1132 {
1133 if (!pVM->hm.s.fMdsClearOnSched)
1134 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry;
1135 pVM->hm.s.fMdsClearOnVmEntry = false;
1136 }
1137 else if ( pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem
1138 || pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_End)
1139 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry = false;
1140
1141 /*
1142 * Sync options.
1143 */
1144 /** @todo Move this out of of CPUMCTX and into some ring-0 only HM structure.
1145 * That will require a little bit of work, of course. */
1146 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1147 {
1148 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1149 PCPUMCTX pCpuCtx = &pVCpu->cpum.GstCtx;
1150 pCpuCtx->fWorldSwitcher &= ~(CPUMCTX_WSF_IBPB_EXIT | CPUMCTX_WSF_IBPB_ENTRY);
1151 if (pVM->cpum.ro.HostFeatures.fIbpb)
1152 {
1153 if (pVM->hm.s.fIbpbOnVmExit)
1154 pCpuCtx->fWorldSwitcher |= CPUMCTX_WSF_IBPB_EXIT;
1155 if (pVM->hm.s.fIbpbOnVmEntry)
1156 pCpuCtx->fWorldSwitcher |= CPUMCTX_WSF_IBPB_ENTRY;
1157 }
1158 if (pVM->cpum.ro.HostFeatures.fFlushCmd && pVM->hm.s.fL1dFlushOnVmEntry)
1159 pCpuCtx->fWorldSwitcher |= CPUMCTX_WSF_L1D_ENTRY;
1160 if (pVM->cpum.ro.HostFeatures.fMdsClear && pVM->hm.s.fMdsClearOnVmEntry)
1161 pCpuCtx->fWorldSwitcher |= CPUMCTX_WSF_MDS_ENTRY;
1162 if (iCpu == 0)
1163 LogRel(("HM: fWorldSwitcher=%#x (fIbpbOnVmExit=%RTbool fIbpbOnVmEntry=%RTbool fL1dFlushOnVmEntry=%RTbool); fL1dFlushOnSched=%RTbool fMdsClearOnVmEntry=%RTbool\n",
1164 pCpuCtx->fWorldSwitcher, pVM->hm.s.fIbpbOnVmExit, pVM->hm.s.fIbpbOnVmEntry, pVM->hm.s.fL1dFlushOnVmEntry,
1165 pVM->hm.s.fL1dFlushOnSched, pVM->hm.s.fMdsClearOnVmEntry));
1166 }
1167
1168 /*
1169 * Do the vendor specific initialization
1170 *
1171 * Note! We disable release log buffering here since we're doing relatively
1172 * lot of logging and doesn't want to hit the disk with each LogRel
1173 * statement.
1174 */
1175 AssertLogRelReturn(!pVM->hm.s.fInitialized, VERR_HM_IPE_5);
1176 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
1177 if (pVM->hm.s.vmx.fSupported)
1178 rc = hmR3InitFinalizeR0Intel(pVM);
1179 else
1180 rc = hmR3InitFinalizeR0Amd(pVM);
1181 LogRel((pVM->hm.s.fGlobalInit ? "HM: VT-x/AMD-V init method: Global\n"
1182 : "HM: VT-x/AMD-V init method: Local\n"));
1183 RTLogRelSetBuffering(fOldBuffered);
1184 pVM->hm.s.fInitialized = true;
1185
1186 return rc;
1187}
1188
1189
1190/**
1191 * @callback_method_impl{FNPDMVMMDEVHEAPNOTIFY}
1192 */
1193static DECLCALLBACK(void) hmR3VmmDevHeapNotify(PVM pVM, void *pvAllocation, RTGCPHYS GCPhysAllocation)
1194{
1195 NOREF(pVM);
1196 NOREF(pvAllocation);
1197 NOREF(GCPhysAllocation);
1198}
1199
1200
1201/**
1202 * Returns a description of the VMCS (and associated regions') memory type given the
1203 * IA32_VMX_BASIC MSR.
1204 *
1205 * @returns The descriptive memory type.
1206 * @param uMsrVmxBasic IA32_VMX_BASIC MSR value.
1207 */
1208static const char *hmR3VmxGetMemTypeDesc(uint64_t uMsrVmxBasic)
1209{
1210 uint8_t const uMemType = RT_BF_GET(uMsrVmxBasic, VMX_BF_BASIC_VMCS_MEM_TYPE);
1211 switch (uMemType)
1212 {
1213 case VMX_BASIC_MEM_TYPE_WB: return "Write Back (WB)";
1214 case VMX_BASIC_MEM_TYPE_UC: return "Uncacheable (UC)";
1215 }
1216 return "Unknown";
1217}
1218
1219
1220/**
1221 * Returns a single-line description of all the activity-states supported by the CPU
1222 * given the IA32_VMX_MISC MSR.
1223 *
1224 * @returns All supported activity states.
1225 * @param uMsrMisc IA32_VMX_MISC MSR value.
1226 */
1227static const char *hmR3VmxGetActivityStateAllDesc(uint64_t uMsrMisc)
1228{
1229 static const char * const s_apszActStates[] =
1230 {
1231 "",
1232 " ( HLT )",
1233 " ( SHUTDOWN )",
1234 " ( HLT SHUTDOWN )",
1235 " ( SIPI_WAIT )",
1236 " ( HLT SIPI_WAIT )",
1237 " ( SHUTDOWN SIPI_WAIT )",
1238 " ( HLT SHUTDOWN SIPI_WAIT )"
1239 };
1240 uint8_t const idxActStates = RT_BF_GET(uMsrMisc, VMX_BF_MISC_ACTIVITY_STATES);
1241 Assert(idxActStates < RT_ELEMENTS(s_apszActStates));
1242 return s_apszActStates[idxActStates];
1243}
1244
1245
1246/**
1247 * Reports MSR_IA32_FEATURE_CONTROL MSR to the log.
1248 *
1249 * @param fFeatMsr The feature control MSR value.
1250 */
1251static void hmR3VmxReportFeatCtlMsr(uint64_t fFeatMsr)
1252{
1253 uint64_t const val = fFeatMsr;
1254 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", val));
1255 HMVMX_REPORT_MSR_CAP(val, "LOCK", MSR_IA32_FEATURE_CONTROL_LOCK);
1256 HMVMX_REPORT_MSR_CAP(val, "SMX_VMXON", MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
1257 HMVMX_REPORT_MSR_CAP(val, "VMXON", MSR_IA32_FEATURE_CONTROL_VMXON);
1258 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN0", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_0);
1259 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN1", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_1);
1260 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN2", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_2);
1261 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN3", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_3);
1262 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN4", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_4);
1263 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN5", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_5);
1264 HMVMX_REPORT_MSR_CAP(val, "SENTER_LOCAL_FN6", MSR_IA32_FEATURE_CONTROL_SENTER_LOCAL_FN_6);
1265 HMVMX_REPORT_MSR_CAP(val, "SENTER_GLOBAL_EN", MSR_IA32_FEATURE_CONTROL_SENTER_GLOBAL_EN);
1266 HMVMX_REPORT_MSR_CAP(val, "SGX_LAUNCH_EN", MSR_IA32_FEATURE_CONTROL_SGX_LAUNCH_EN);
1267 HMVMX_REPORT_MSR_CAP(val, "SGX_GLOBAL_EN", MSR_IA32_FEATURE_CONTROL_SGX_GLOBAL_EN);
1268 HMVMX_REPORT_MSR_CAP(val, "LMCE", MSR_IA32_FEATURE_CONTROL_LMCE);
1269 if (!(val & MSR_IA32_FEATURE_CONTROL_LOCK))
1270 LogRel(("HM: MSR_IA32_FEATURE_CONTROL lock bit not set, possibly bad hardware!\n"));
1271}
1272
1273
1274/**
1275 * Reports MSR_IA32_VMX_BASIC MSR to the log.
1276 *
1277 * @param uBasicMsr The VMX basic MSR value.
1278 */
1279static void hmR3VmxReportBasicMsr(uint64_t uBasicMsr)
1280{
1281 LogRel(("HM: MSR_IA32_VMX_BASIC = %#RX64\n", uBasicMsr));
1282 LogRel(("HM: VMCS id = %#x\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_ID)));
1283 LogRel(("HM: VMCS size = %u bytes\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_SIZE)));
1284 LogRel(("HM: VMCS physical address limit = %s\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_PHYSADDR_WIDTH) ?
1285 "< 4 GB" : "None"));
1286 LogRel(("HM: VMCS memory type = %s\n", hmR3VmxGetMemTypeDesc(uBasicMsr)));
1287 LogRel(("HM: Dual-monitor treatment support = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_DUAL_MON)));
1288 LogRel(("HM: OUTS & INS instruction-info = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_VMCS_INS_OUTS)));
1289 LogRel(("HM: Supports true capability MSRs = %RTbool\n", RT_BF_GET(uBasicMsr, VMX_BF_BASIC_TRUE_CTLS)));
1290}
1291
1292
1293/**
1294 * Reports MSR_IA32_PINBASED_CTLS to the log.
1295 *
1296 * @param pVmxMsr Pointer to the VMX MSR.
1297 */
1298static void hmR3VmxReportPinBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1299{
1300 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1301 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1302 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %#RX64\n", pVmxMsr->u));
1303 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EXT_INT_EXIT", VMX_PIN_CTLS_EXT_INT_EXIT);
1304 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "NMI_EXIT", VMX_PIN_CTLS_NMI_EXIT);
1305 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRTUAL_NMI", VMX_PIN_CTLS_VIRT_NMI);
1306 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PREEMPT_TIMER", VMX_PIN_CTLS_PREEMPT_TIMER);
1307 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "POSTED_INT", VMX_PIN_CTLS_POSTED_INT);
1308}
1309
1310
1311/**
1312 * Reports MSR_IA32_VMX_PROCBASED_CTLS MSR to the log.
1313 *
1314 * @param pVmxMsr Pointer to the VMX MSR.
1315 */
1316static void hmR3VmxReportProcBasedCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1317{
1318 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1319 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1320 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %#RX64\n", pVmxMsr->u));
1321 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INT_WINDOW_EXIT", VMX_PROC_CTLS_INT_WINDOW_EXIT);
1322 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TSC_OFFSETTING", VMX_PROC_CTLS_USE_TSC_OFFSETTING);
1323 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "HLT_EXIT", VMX_PROC_CTLS_HLT_EXIT);
1324 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INVLPG_EXIT", VMX_PROC_CTLS_INVLPG_EXIT);
1325 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MWAIT_EXIT", VMX_PROC_CTLS_MWAIT_EXIT);
1326 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDPMC_EXIT", VMX_PROC_CTLS_RDPMC_EXIT);
1327 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDTSC_EXIT", VMX_PROC_CTLS_RDTSC_EXIT);
1328 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR3_LOAD_EXIT", VMX_PROC_CTLS_CR3_LOAD_EXIT);
1329 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR3_STORE_EXIT", VMX_PROC_CTLS_CR3_STORE_EXIT);
1330 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR8_LOAD_EXIT", VMX_PROC_CTLS_CR8_LOAD_EXIT);
1331 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CR8_STORE_EXIT", VMX_PROC_CTLS_CR8_STORE_EXIT);
1332 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_TPR_SHADOW", VMX_PROC_CTLS_USE_TPR_SHADOW);
1333 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "NMI_WINDOW_EXIT", VMX_PROC_CTLS_NMI_WINDOW_EXIT);
1334 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MOV_DR_EXIT", VMX_PROC_CTLS_MOV_DR_EXIT);
1335 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "UNCOND_IO_EXIT", VMX_PROC_CTLS_UNCOND_IO_EXIT);
1336 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_IO_BITMAPS", VMX_PROC_CTLS_USE_IO_BITMAPS);
1337 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MONITOR_TRAP_FLAG", VMX_PROC_CTLS_MONITOR_TRAP_FLAG);
1338 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_MSR_BITMAPS", VMX_PROC_CTLS_USE_MSR_BITMAPS);
1339 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MONITOR_EXIT", VMX_PROC_CTLS_MONITOR_EXIT);
1340 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PAUSE_EXIT", VMX_PROC_CTLS_PAUSE_EXIT);
1341 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USE_SECONDARY_CTLS", VMX_PROC_CTLS_USE_SECONDARY_CTLS);
1342}
1343
1344
1345/**
1346 * Reports MSR_IA32_VMX_PROCBASED_CTLS2 MSR to the log.
1347 *
1348 * @param pVmxMsr Pointer to the VMX MSR.
1349 */
1350static void hmR3VmxReportProcBasedCtls2Msr(PCVMXCTLSMSR pVmxMsr)
1351{
1352 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1353 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1354 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %#RX64\n", pVmxMsr->u));
1355 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_APIC_ACCESS", VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
1356 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EPT", VMX_PROC_CTLS2_EPT);
1357 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "DESC_TABLE_EXIT", VMX_PROC_CTLS2_DESC_TABLE_EXIT);
1358 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDTSCP", VMX_PROC_CTLS2_RDTSCP);
1359 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_X2APIC_MODE", VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
1360 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VPID", VMX_PROC_CTLS2_VPID);
1361 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "WBINVD_EXIT", VMX_PROC_CTLS2_WBINVD_EXIT);
1362 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "UNRESTRICTED_GUEST", VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
1363 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "APIC_REG_VIRT", VMX_PROC_CTLS2_APIC_REG_VIRT);
1364 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VIRT_INT_DELIVERY", VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
1365 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PAUSE_LOOP_EXIT", VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
1366 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDRAND_EXIT", VMX_PROC_CTLS2_RDRAND_EXIT);
1367 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "INVPCID", VMX_PROC_CTLS2_INVPCID);
1368 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VMFUNC", VMX_PROC_CTLS2_VMFUNC);
1369 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "VMCS_SHADOWING", VMX_PROC_CTLS2_VMCS_SHADOWING);
1370 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENCLS_EXIT", VMX_PROC_CTLS2_ENCLS_EXIT);
1371 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "RDSEED_EXIT", VMX_PROC_CTLS2_RDSEED_EXIT);
1372 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PML", VMX_PROC_CTLS2_PML);
1373 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "EPT_VE", VMX_PROC_CTLS2_EPT_VE);
1374 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_PROC_CTLS2_CONCEAL_VMX_FROM_PT);
1375 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "XSAVES_XRSTORS", VMX_PROC_CTLS2_XSAVES_XRSTORS);
1376 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "MODE_BASED_EPT_PERM", VMX_PROC_CTLS2_MODE_BASED_EPT_PERM);
1377 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SPPTP_EPT", VMX_PROC_CTLS2_SPPTP_EPT);
1378 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "PT_EPT", VMX_PROC_CTLS2_PT_EPT);
1379 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "TSC_SCALING", VMX_PROC_CTLS2_TSC_SCALING);
1380 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "USER_WAIT_PAUSE", VMX_PROC_CTLS2_USER_WAIT_PAUSE);
1381 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENCLV_EXIT", VMX_PROC_CTLS2_ENCLV_EXIT);
1382}
1383
1384
1385/**
1386 * Reports MSR_IA32_VMX_ENTRY_CTLS to the log.
1387 *
1388 * @param pVmxMsr Pointer to the VMX MSR.
1389 */
1390static void hmR3VmxReportEntryCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1391{
1392 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1393 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1394 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %#RX64\n", pVmxMsr->u));
1395 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_DEBUG", VMX_ENTRY_CTLS_LOAD_DEBUG);
1396 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "IA32E_MODE_GUEST", VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
1397 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ENTRY_TO_SMM", VMX_ENTRY_CTLS_ENTRY_TO_SMM);
1398 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "DEACTIVATE_DUAL_MON", VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON);
1399 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PERF_MSR", VMX_ENTRY_CTLS_LOAD_PERF_MSR);
1400 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PAT_MSR", VMX_ENTRY_CTLS_LOAD_PAT_MSR);
1401 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_EFER_MSR", VMX_ENTRY_CTLS_LOAD_EFER_MSR);
1402 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_BNDCFGS_MSR", VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR);
1403 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT);
1404 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_RTIT_CTL_MSR", VMX_ENTRY_CTLS_LOAD_RTIT_CTL_MSR);
1405}
1406
1407
1408/**
1409 * Reports MSR_IA32_VMX_EXIT_CTLS to the log.
1410 *
1411 * @param pVmxMsr Pointer to the VMX MSR.
1412 */
1413static void hmR3VmxReportExitCtlsMsr(PCVMXCTLSMSR pVmxMsr)
1414{
1415 uint64_t const fAllowed1 = pVmxMsr->n.allowed1;
1416 uint64_t const fAllowed0 = pVmxMsr->n.allowed0;
1417 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %#RX64\n", pVmxMsr->u));
1418 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_DEBUG", VMX_EXIT_CTLS_SAVE_DEBUG);
1419 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "HOST_ADDR_SPACE_SIZE", VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
1420 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PERF_MSR", VMX_EXIT_CTLS_LOAD_PERF_MSR);
1421 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "ACK_EXT_INT", VMX_EXIT_CTLS_ACK_EXT_INT);
1422 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_PAT_MSR", VMX_EXIT_CTLS_SAVE_PAT_MSR);
1423 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_PAT_MSR", VMX_EXIT_CTLS_LOAD_PAT_MSR);
1424 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_EFER_MSR", VMX_EXIT_CTLS_SAVE_EFER_MSR);
1425 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "LOAD_EFER_MSR", VMX_EXIT_CTLS_LOAD_EFER_MSR);
1426 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "SAVE_PREEMPT_TIMER", VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
1427 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CLEAR_BNDCFGS_MSR", VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR);
1428 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CONCEAL_VMX_FROM_PT", VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT);
1429 HMVMX_REPORT_FEAT(fAllowed1, fAllowed0, "CLEAR_RTIT_CTL_MSR", VMX_EXIT_CTLS_CLEAR_RTIT_CTL_MSR);
1430}
1431
1432
1433/**
1434 * Reports MSR_IA32_VMX_EPT_VPID_CAP MSR to the log.
1435 *
1436 * @param fCaps The VMX EPT/VPID capability MSR value.
1437 */
1438static void hmR3VmxReportEptVpidCapsMsr(uint64_t fCaps)
1439{
1440 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAP = %#RX64\n", fCaps));
1441 HMVMX_REPORT_MSR_CAP(fCaps, "RWX_X_ONLY", MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY);
1442 HMVMX_REPORT_MSR_CAP(fCaps, "PAGE_WALK_LENGTH_4", MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4);
1443 HMVMX_REPORT_MSR_CAP(fCaps, "EMT_UC", MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC);
1444 HMVMX_REPORT_MSR_CAP(fCaps, "EMT_WB", MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB);
1445 HMVMX_REPORT_MSR_CAP(fCaps, "PDE_2M", MSR_IA32_VMX_EPT_VPID_CAP_PDE_2M);
1446 HMVMX_REPORT_MSR_CAP(fCaps, "PDPTE_1G", MSR_IA32_VMX_EPT_VPID_CAP_PDPTE_1G);
1447 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT);
1448 HMVMX_REPORT_MSR_CAP(fCaps, "EPT_ACCESS_DIRTY", MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY);
1449 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT);
1450 HMVMX_REPORT_MSR_CAP(fCaps, "INVEPT_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
1451 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID);
1452 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_INDIV_ADDR", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
1453 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT);
1454 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_ALL_CONTEXTS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS);
1455 HMVMX_REPORT_MSR_CAP(fCaps, "INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS", MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS);
1456}
1457
1458
1459/**
1460 * Reports MSR_IA32_VMX_MISC MSR to the log.
1461 *
1462 * @param pVM Pointer to the VM.
1463 * @param fMisc The VMX misc. MSR value.
1464 */
1465static void hmR3VmxReportMiscMsr(PVM pVM, uint64_t fMisc)
1466{
1467 LogRel(("HM: MSR_IA32_VMX_MISC = %#RX64\n", fMisc));
1468 uint8_t const cPreemptTimerShift = RT_BF_GET(fMisc, VMX_BF_MISC_PREEMPT_TIMER_TSC);
1469 if (cPreemptTimerShift == pVM->hm.s.vmx.cPreemptTimerShift)
1470 LogRel(("HM: PREEMPT_TIMER_TSC = %#x\n", cPreemptTimerShift));
1471 else
1472 {
1473 LogRel(("HM: PREEMPT_TIMER_TSC = %#x - erratum detected, using %#x instead\n", cPreemptTimerShift,
1474 pVM->hm.s.vmx.cPreemptTimerShift));
1475 }
1476 LogRel(("HM: EXIT_SAVE_EFER_LMA = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_EXIT_SAVE_EFER_LMA)));
1477 LogRel(("HM: ACTIVITY_STATES = %#x%s\n", RT_BF_GET(fMisc, VMX_BF_MISC_ACTIVITY_STATES),
1478 hmR3VmxGetActivityStateAllDesc(fMisc)));
1479 LogRel(("HM: INTEL_PT = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_INTEL_PT)));
1480 LogRel(("HM: SMM_READ_SMBASE_MSR = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_SMM_READ_SMBASE_MSR)));
1481 LogRel(("HM: CR3_TARGET = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_CR3_TARGET)));
1482 LogRel(("HM: MAX_MSR = %#x ( %u )\n", RT_BF_GET(fMisc, VMX_BF_MISC_MAX_MSRS),
1483 VMX_MISC_MAX_MSRS(fMisc)));
1484 LogRel(("HM: VMXOFF_BLOCK_SMI = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_VMXOFF_BLOCK_SMI)));
1485 LogRel(("HM: VMWRITE_ALL = %RTbool\n", RT_BF_GET(fMisc, VMX_BF_MISC_VMWRITE_ALL)));
1486 LogRel(("HM: ENTRY_INJECT_SOFT_INT = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_ENTRY_INJECT_SOFT_INT)));
1487 LogRel(("HM: MSEG_ID = %#x\n", RT_BF_GET(fMisc, VMX_BF_MISC_MSEG_ID)));
1488}
1489
1490
1491/**
1492 * Reports MSR_IA32_VMX_VMCS_ENUM MSR to the log.
1493 *
1494 * @param uVmcsEnum The VMX VMCS enum MSR value.
1495 */
1496static void hmR3VmxReportVmcsEnumMsr(uint64_t uVmcsEnum)
1497{
1498 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %#RX64\n", uVmcsEnum));
1499 LogRel(("HM: HIGHEST_IDX = %#x\n", RT_BF_GET(uVmcsEnum, VMX_BF_VMCS_ENUM_HIGHEST_IDX)));
1500}
1501
1502
1503/**
1504 * Reports MSR_IA32_VMX_VMFUNC MSR to the log.
1505 *
1506 * @param uVmFunc The VMX VMFUNC MSR value.
1507 */
1508static void hmR3VmxReportVmFuncMsr(uint64_t uVmFunc)
1509{
1510 LogRel(("HM: MSR_IA32_VMX_VMFUNC = %#RX64\n", uVmFunc));
1511 HMVMX_REPORT_ALLOWED_FEAT(uVmFunc, "EPTP_SWITCHING", RT_BF_GET(uVmFunc, VMX_BF_VMFUNC_EPTP_SWITCHING));
1512}
1513
1514
1515/**
1516 * Reports VMX CR0, CR4 fixed MSRs.
1517 *
1518 * @param pMsrs Pointer to the VMX MSRs.
1519 */
1520static void hmR3VmxReportCrFixedMsrs(PVMXMSRS pMsrs)
1521{
1522 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %#RX64\n", pMsrs->u64Cr0Fixed0));
1523 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %#RX64\n", pMsrs->u64Cr0Fixed1));
1524 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %#RX64\n", pMsrs->u64Cr4Fixed0));
1525 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %#RX64\n", pMsrs->u64Cr4Fixed1));
1526}
1527
1528
1529/**
1530 * Finish VT-x initialization (after ring-0 init).
1531 *
1532 * @returns VBox status code.
1533 * @param pVM The cross context VM structure.
1534 */
1535static int hmR3InitFinalizeR0Intel(PVM pVM)
1536{
1537 int rc;
1538
1539 Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
1540 AssertLogRelReturn(pVM->hm.s.vmx.Msrs.u64FeatCtrl != 0, VERR_HM_IPE_4);
1541
1542 LogRel(("HM: Using VT-x implementation 3.0\n"));
1543 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops));
1544 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.vmx.u64HostCr4));
1545 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.vmx.u64HostMsrEfer));
1546 LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL = %#RX64\n", pVM->hm.s.vmx.u64HostSmmMonitorCtl));
1547
1548 hmR3VmxReportFeatCtlMsr(pVM->hm.s.vmx.Msrs.u64FeatCtrl);
1549 hmR3VmxReportBasicMsr(pVM->hm.s.vmx.Msrs.u64Basic);
1550
1551 hmR3VmxReportPinBasedCtlsMsr(&pVM->hm.s.vmx.Msrs.PinCtls);
1552 hmR3VmxReportProcBasedCtlsMsr(&pVM->hm.s.vmx.Msrs.ProcCtls);
1553 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1554 hmR3VmxReportProcBasedCtls2Msr(&pVM->hm.s.vmx.Msrs.ProcCtls2);
1555
1556 hmR3VmxReportEntryCtlsMsr(&pVM->hm.s.vmx.Msrs.EntryCtls);
1557 hmR3VmxReportExitCtlsMsr(&pVM->hm.s.vmx.Msrs.ExitCtls);
1558
1559 if (RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
1560 {
1561 /* We don't extensively dump the true capability MSRs as we don't use them, see @bugref{9180#c5}. */
1562 LogRel(("HM: MSR_IA32_VMX_TRUE_PINBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.TruePinCtls));
1563 LogRel(("HM: MSR_IA32_VMX_TRUE_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.TrueProcCtls));
1564 LogRel(("HM: MSR_IA32_VMX_TRUE_ENTRY_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.TrueEntryCtls));
1565 LogRel(("HM: MSR_IA32_VMX_TRUE_EXIT_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.TrueExitCtls));
1566 }
1567
1568 hmR3VmxReportMiscMsr(pVM, pVM->hm.s.vmx.Msrs.u64Misc);
1569 hmR3VmxReportVmcsEnumMsr(pVM->hm.s.vmx.Msrs.u64VmcsEnum);
1570 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps)
1571 hmR3VmxReportEptVpidCapsMsr(pVM->hm.s.vmx.Msrs.u64EptVpidCaps);
1572 if (pVM->hm.s.vmx.Msrs.u64VmFunc)
1573 hmR3VmxReportVmFuncMsr(pVM->hm.s.vmx.Msrs.u64VmFunc);
1574 hmR3VmxReportCrFixedMsrs(&pVM->hm.s.vmx.Msrs);
1575
1576 LogRel(("HM: APIC-access page physaddr = %#RHp\n", pVM->hm.s.vmx.HCPhysApicAccess));
1577 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1578 {
1579 PCVMXVMCSINFO pVmcsInfo = &pVM->aCpus[i].hm.s.vmx.VmcsInfo;
1580 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", i, pVmcsInfo->HCPhysMsrBitmap));
1581 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", i, pVmcsInfo->HCPhysVmcs));
1582 }
1583#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1584 if (pVM->cpum.ro.GuestFeatures.fVmx)
1585 {
1586 LogRel(("HM: Nested-guest:\n"));
1587 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1588 {
1589 PCVMXVMCSINFO pVmcsInfoNstGst = &pVM->aCpus[i].hm.s.vmx.VmcsInfoNstGst;
1590 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", i, pVmcsInfoNstGst->HCPhysMsrBitmap));
1591 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", i, pVmcsInfoNstGst->HCPhysVmcs));
1592 }
1593 }
1594#endif
1595
1596 /*
1597 * EPT and unrestricted guest execution are determined in HMR3Init, verify the sanity of that.
1598 */
1599 AssertLogRelReturn( !pVM->hm.s.fNestedPaging
1600 || (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_EPT),
1601 VERR_HM_IPE_1);
1602 AssertLogRelReturn( !pVM->hm.s.vmx.fUnrestrictedGuest
1603 || ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
1604 && pVM->hm.s.fNestedPaging),
1605 VERR_HM_IPE_1);
1606
1607 /*
1608 * Disallow RDTSCP in the guest if there is no secondary process-based VM execution controls as otherwise
1609 * RDTSCP would cause a #UD. There might be no CPUs out there where this happens, as RDTSCP was introduced
1610 * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel...
1611 */
1612 if ( !(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1613 && CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
1614 {
1615 CPUMR3ClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
1616 LogRel(("HM: Disabled RDTSCP\n"));
1617 }
1618
1619 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
1620 {
1621 /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
1622 rc = PDMR3VmmDevHeapAlloc(pVM, HM_VTX_TOTAL_DEVHEAP_MEM, hmR3VmmDevHeapNotify, (RTR3PTR *)&pVM->hm.s.vmx.pRealModeTSS);
1623 if (RT_SUCCESS(rc))
1624 {
1625 /* The IO bitmap starts right after the virtual interrupt redirection bitmap.
1626 Refer Intel spec. 20.3.3 "Software Interrupt Handling in Virtual-8086 mode"
1627 esp. Figure 20-5.*/
1628 ASMMemZero32(pVM->hm.s.vmx.pRealModeTSS, sizeof(*pVM->hm.s.vmx.pRealModeTSS));
1629 pVM->hm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hm.s.vmx.pRealModeTSS);
1630
1631 /* Bit set to 0 means software interrupts are redirected to the
1632 8086 program interrupt handler rather than switching to
1633 protected-mode handler. */
1634 memset(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap, 0, sizeof(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap));
1635
1636 /* Allow all port IO, so that port IO instructions do not cause
1637 exceptions and would instead cause a VM-exit (based on VT-x's
1638 IO bitmap which we currently configure to always cause an exit). */
1639 memset(pVM->hm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE * 2);
1640 *((unsigned char *)pVM->hm.s.vmx.pRealModeTSS + HM_VTX_TSS_SIZE - 2) = 0xff;
1641
1642 /*
1643 * Construct a 1024 element page directory with 4 MB pages for the identity mapped
1644 * page table used in real and protected mode without paging with EPT.
1645 */
1646 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
1647 for (uint32_t i = 0; i < X86_PG_ENTRIES; i++)
1648 {
1649 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
1650 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US
1651 | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS
1652 | X86_PDE4M_G;
1653 }
1654
1655 /* We convert it here every time as PCI regions could be reconfigured. */
1656 if (PDMVmmDevHeapIsEnabled(pVM))
1657 {
1658 RTGCPHYS GCPhys;
1659 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
1660 AssertRCReturn(rc, rc);
1661 LogRel(("HM: Real Mode TSS guest physaddr = %#RGp\n", GCPhys));
1662
1663 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
1664 AssertRCReturn(rc, rc);
1665 LogRel(("HM: Non-Paging Mode EPT CR3 = %#RGp\n", GCPhys));
1666 }
1667 }
1668 else
1669 {
1670 LogRel(("HM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
1671 pVM->hm.s.vmx.pRealModeTSS = NULL;
1672 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = NULL;
1673 return VMSetError(pVM, rc, RT_SRC_POS,
1674 "HM failure: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)", rc);
1675 }
1676 }
1677
1678 LogRel((pVM->hm.s.fAllow64BitGuests ? "HM: Guest support: 32-bit and 64-bit\n"
1679 : "HM: Guest support: 32-bit only\n"));
1680
1681 /*
1682 * Call ring-0 to set up the VM.
1683 */
1684 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /* idCpu */, VMMR0_DO_HM_SETUP_VM, 0 /* u64Arg */, NULL /* pReqHdr */);
1685 if (rc != VINF_SUCCESS)
1686 {
1687 LogRel(("HM: VMX setup failed with rc=%Rrc!\n", rc));
1688 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1689 {
1690 PVMCPU pVCpu = &pVM->aCpus[i];
1691 LogRel(("HM: CPU[%u] Last instruction error %#x\n", i, pVCpu->hm.s.vmx.LastError.u32InstrError));
1692 LogRel(("HM: CPU[%u] HM error %#x (%u)\n", i, pVCpu->hm.s.u32HMError, pVCpu->hm.s.u32HMError));
1693 }
1694 HMR3CheckError(pVM, rc);
1695 return VMSetError(pVM, rc, RT_SRC_POS, "VT-x setup failed: %Rrc", rc);
1696 }
1697
1698 LogRel(("HM: Supports VMCS EFER fields = %RTbool\n", pVM->hm.s.vmx.fSupportsVmcsEfer));
1699 LogRel(("HM: Enabled VMX\n"));
1700 pVM->hm.s.vmx.fEnabled = true;
1701
1702 hmR3DisableRawMode(pVM); /** @todo make this go away! */
1703
1704 /*
1705 * Change the CPU features.
1706 */
1707 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1708 if (pVM->hm.s.fAllow64BitGuests)
1709 {
1710 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1711 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1712 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */
1713 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1714 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1715 }
1716 /* Turn on NXE if PAE has been enabled *and* the host has turned on NXE
1717 (we reuse the host EFER in the switcher). */
1718 /** @todo this needs to be fixed properly!! */
1719 else if (CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1720 {
1721 if (pVM->hm.s.vmx.u64HostMsrEfer & MSR_K6_EFER_NXE)
1722 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1723 else
1724 LogRel(("HM: NX not enabled on the host, unavailable to PAE guest\n"));
1725 }
1726
1727 /*
1728 * Log configuration details.
1729 */
1730 if (pVM->hm.s.fNestedPaging)
1731 {
1732 LogRel(("HM: Enabled nested paging\n"));
1733 if (pVM->hm.s.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_SINGLE_CONTEXT)
1734 LogRel(("HM: EPT flush type = Single context\n"));
1735 else if (pVM->hm.s.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_ALL_CONTEXTS)
1736 LogRel(("HM: EPT flush type = All contexts\n"));
1737 else if (pVM->hm.s.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_NOT_SUPPORTED)
1738 LogRel(("HM: EPT flush type = Not supported\n"));
1739 else
1740 LogRel(("HM: EPT flush type = %#x\n", pVM->hm.s.vmx.enmTlbFlushEpt));
1741
1742 if (pVM->hm.s.vmx.fUnrestrictedGuest)
1743 LogRel(("HM: Enabled unrestricted guest execution\n"));
1744
1745#if HC_ARCH_BITS == 64
1746 if (pVM->hm.s.fLargePages)
1747 {
1748 /* Use large (2 MB) pages for our EPT PDEs where possible. */
1749 PGMSetLargePageUsage(pVM, true);
1750 LogRel(("HM: Enabled large page support\n"));
1751 }
1752#endif
1753 }
1754 else
1755 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);
1756
1757 if (pVM->hm.s.vmx.fVpid)
1758 {
1759 LogRel(("HM: Enabled VPID\n"));
1760 if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_INDIV_ADDR)
1761 LogRel(("HM: VPID flush type = Individual addresses\n"));
1762 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
1763 LogRel(("HM: VPID flush type = Single context\n"));
1764 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS)
1765 LogRel(("HM: VPID flush type = All contexts\n"));
1766 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1767 LogRel(("HM: VPID flush type = Single context retain globals\n"));
1768 else
1769 LogRel(("HM: VPID flush type = %#x\n", pVM->hm.s.vmx.enmTlbFlushVpid));
1770 }
1771 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_NOT_SUPPORTED)
1772 LogRel(("HM: Ignoring VPID capabilities of CPU\n"));
1773
1774 if (pVM->hm.s.vmx.fUsePreemptTimer)
1775 LogRel(("HM: Enabled VMX-preemption timer (cPreemptTimerShift=%u)\n", pVM->hm.s.vmx.cPreemptTimerShift));
1776 else
1777 LogRel(("HM: Disabled VMX-preemption timer\n"));
1778
1779 if (pVM->hm.s.fVirtApicRegs)
1780 LogRel(("HM: Enabled APIC-register virtualization support\n"));
1781
1782 if (pVM->hm.s.fPostedIntrs)
1783 LogRel(("HM: Enabled posted-interrupt processing support\n"));
1784
1785 if (pVM->hm.s.vmx.fUseVmcsShadowing)
1786 {
1787 bool const fFullVmcsShadow = RT_BOOL(pVM->hm.s.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL);
1788 LogRel(("HM: Enabled %s VMCS shadowing\n", fFullVmcsShadow ? "full" : "partial"));
1789 }
1790
1791 return VINF_SUCCESS;
1792}
1793
1794
1795/**
1796 * Finish AMD-V initialization (after ring-0 init).
1797 *
1798 * @returns VBox status code.
1799 * @param pVM The cross context VM structure.
1800 */
1801static int hmR3InitFinalizeR0Amd(PVM pVM)
1802{
1803 Log(("pVM->hm.s.svm.fSupported = %d\n", pVM->hm.s.svm.fSupported));
1804
1805 LogRel(("HM: Using AMD-V implementation 2.0\n"));
1806
1807 uint32_t u32Family;
1808 uint32_t u32Model;
1809 uint32_t u32Stepping;
1810 if (HMIsSubjectToSvmErratum170(&u32Family, &u32Model, &u32Stepping))
1811 LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
1812 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops));
1813 LogRel(("HM: AMD HWCR MSR = %#RX64\n", pVM->hm.s.svm.u64MsrHwcr));
1814 LogRel(("HM: AMD-V revision = %#x\n", pVM->hm.s.svm.u32Rev));
1815 LogRel(("HM: AMD-V max ASID = %RU32\n", pVM->hm.s.uMaxAsid));
1816 LogRel(("HM: AMD-V features = %#x\n", pVM->hm.s.svm.u32Features));
1817
1818 /*
1819 * Enumerate AMD-V features.
1820 */
1821 static const struct { uint32_t fFlag; const char *pszName; } s_aSvmFeatures[] =
1822 {
1823#define HMSVM_REPORT_FEATURE(a_StrDesc, a_Define) { a_Define, a_StrDesc }
1824 HMSVM_REPORT_FEATURE("NESTED_PAGING", X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
1825 HMSVM_REPORT_FEATURE("LBR_VIRT", X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT),
1826 HMSVM_REPORT_FEATURE("SVM_LOCK", X86_CPUID_SVM_FEATURE_EDX_SVM_LOCK),
1827 HMSVM_REPORT_FEATURE("NRIP_SAVE", X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE),
1828 HMSVM_REPORT_FEATURE("TSC_RATE_MSR", X86_CPUID_SVM_FEATURE_EDX_TSC_RATE_MSR),
1829 HMSVM_REPORT_FEATURE("VMCB_CLEAN", X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN),
1830 HMSVM_REPORT_FEATURE("FLUSH_BY_ASID", X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID),
1831 HMSVM_REPORT_FEATURE("DECODE_ASSISTS", X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS),
1832 HMSVM_REPORT_FEATURE("PAUSE_FILTER", X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER),
1833 HMSVM_REPORT_FEATURE("PAUSE_FILTER_THRESHOLD", X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD),
1834 HMSVM_REPORT_FEATURE("AVIC", X86_CPUID_SVM_FEATURE_EDX_AVIC),
1835 HMSVM_REPORT_FEATURE("VIRT_VMSAVE_VMLOAD", X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD),
1836 HMSVM_REPORT_FEATURE("VGIF", X86_CPUID_SVM_FEATURE_EDX_VGIF),
1837#undef HMSVM_REPORT_FEATURE
1838 };
1839
1840 uint32_t fSvmFeatures = pVM->hm.s.svm.u32Features;
1841 for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++)
1842 if (fSvmFeatures & s_aSvmFeatures[i].fFlag)
1843 {
1844 LogRel(("HM: %s\n", s_aSvmFeatures[i].pszName));
1845 fSvmFeatures &= ~s_aSvmFeatures[i].fFlag;
1846 }
1847 if (fSvmFeatures)
1848 for (unsigned iBit = 0; iBit < 32; iBit++)
1849 if (RT_BIT_32(iBit) & fSvmFeatures)
1850 LogRel(("HM: Reserved bit %u\n", iBit));
1851
1852 /*
1853 * Nested paging is determined in HMR3Init, verify the sanity of that.
1854 */
1855 AssertLogRelReturn( !pVM->hm.s.fNestedPaging
1856 || (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
1857 VERR_HM_IPE_1);
1858
1859#if 0
1860 /** @todo Add and query IPRT API for host OS support for posted-interrupt IPI
1861 * here. */
1862 if (RTR0IsPostIpiSupport())
1863 pVM->hm.s.fPostedIntrs = true;
1864#endif
1865
1866 /*
1867 * Call ring-0 to set up the VM.
1868 */
1869 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_SETUP_VM, 0, NULL);
1870 if (rc != VINF_SUCCESS)
1871 {
1872 AssertMsgFailed(("%Rrc\n", rc));
1873 LogRel(("HM: AMD-V setup failed with rc=%Rrc!\n", rc));
1874 return VMSetError(pVM, rc, RT_SRC_POS, "AMD-V setup failed: %Rrc", rc);
1875 }
1876
1877 LogRel(("HM: Enabled SVM\n"));
1878 pVM->hm.s.svm.fEnabled = true;
1879
1880 if (pVM->hm.s.fNestedPaging)
1881 {
1882 LogRel(("HM: Enabled nested paging\n"));
1883
1884 /*
1885 * Enable large pages (2 MB) if applicable.
1886 */
1887#if HC_ARCH_BITS == 64
1888 if (pVM->hm.s.fLargePages)
1889 {
1890 PGMSetLargePageUsage(pVM, true);
1891 LogRel(("HM: Enabled large page support\n"));
1892 }
1893#endif
1894 }
1895
1896 if (pVM->hm.s.fVirtApicRegs)
1897 LogRel(("HM: Enabled APIC-register virtualization support\n"));
1898
1899 if (pVM->hm.s.fPostedIntrs)
1900 LogRel(("HM: Enabled posted-interrupt processing support\n"));
1901
1902 hmR3DisableRawMode(pVM);
1903
1904 /*
1905 * Change the CPU features.
1906 */
1907 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1908 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
1909 if (pVM->hm.s.fAllow64BitGuests)
1910 {
1911 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1912 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1913 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1914 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1915 }
1916 /* Turn on NXE if PAE has been enabled. */
1917 else if (CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1918 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1919
1920 LogRel((pVM->hm.s.fTprPatchingAllowed ? "HM: Enabled TPR patching\n"
1921 : "HM: Disabled TPR patching\n"));
1922
1923 LogRel((pVM->hm.s.fAllow64BitGuests ? "HM: Guest support: 32-bit and 64-bit\n"
1924 : "HM: Guest support: 32-bit only\n"));
1925 return VINF_SUCCESS;
1926}
1927
1928
1929/**
1930 * Applies relocations to data and code managed by this
1931 * component. This function will be called at init and
1932 * whenever the VMM need to relocate it self inside the GC.
1933 *
1934 * @param pVM The cross context VM structure.
1935 */
1936VMMR3_INT_DECL(void) HMR3Relocate(PVM pVM)
1937{
1938 Log(("HMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
1939
1940 /* Fetch the current paging mode during the relocate callback during state loading. */
1941 if (VMR3GetState(pVM) == VMSTATE_LOADING)
1942 {
1943 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1944 {
1945 PVMCPU pVCpu = &pVM->aCpus[i];
1946 pVCpu->hm.s.enmShadowMode = PGMGetShadowMode(pVCpu);
1947 }
1948 }
1949#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
1950 if (HMIsEnabled(pVM))
1951 {
1952 switch (PGMGetHostMode(pVM))
1953 {
1954 case PGMMODE_32_BIT:
1955 pVM->hm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);
1956 break;
1957
1958 case PGMMODE_PAE:
1959 case PGMMODE_PAE_NX:
1960 pVM->hm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);
1961 break;
1962
1963 default:
1964 AssertFailed();
1965 break;
1966 }
1967 }
1968#endif
1969 return;
1970}
1971
1972
1973/**
1974 * Terminates the HM.
1975 *
1976 * Termination means cleaning up and freeing all resources,
1977 * the VM itself is, at this point, powered off or suspended.
1978 *
1979 * @returns VBox status code.
1980 * @param pVM The cross context VM structure.
1981 */
1982VMMR3_INT_DECL(int) HMR3Term(PVM pVM)
1983{
1984 if (pVM->hm.s.vmx.pRealModeTSS)
1985 {
1986 PDMR3VmmDevHeapFree(pVM, pVM->hm.s.vmx.pRealModeTSS);
1987 pVM->hm.s.vmx.pRealModeTSS = 0;
1988 }
1989 hmR3TermCPU(pVM);
1990 return 0;
1991}
1992
1993
1994/**
1995 * Terminates the per-VCPU HM.
1996 *
1997 * @returns VBox status code.
1998 * @param pVM The cross context VM structure.
1999 */
2000static int hmR3TermCPU(PVM pVM)
2001{
2002 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2003 {
2004 PVMCPU pVCpu = &pVM->aCpus[i]; NOREF(pVCpu);
2005
2006#ifdef VBOX_WITH_STATISTICS
2007 if (pVCpu->hm.s.paStatExitReason)
2008 {
2009 MMHyperFree(pVM, pVCpu->hm.s.paStatExitReason);
2010 pVCpu->hm.s.paStatExitReason = NULL;
2011 pVCpu->hm.s.paStatExitReasonR0 = NIL_RTR0PTR;
2012 }
2013 if (pVCpu->hm.s.paStatInjectedIrqs)
2014 {
2015 MMHyperFree(pVM, pVCpu->hm.s.paStatInjectedIrqs);
2016 pVCpu->hm.s.paStatInjectedIrqs = NULL;
2017 pVCpu->hm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;
2018 }
2019# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
2020 if (pVCpu->hm.s.paStatNestedExitReason)
2021 {
2022 MMHyperFree(pVM, pVCpu->hm.s.paStatNestedExitReason);
2023 pVCpu->hm.s.paStatNestedExitReason = NULL;
2024 pVCpu->hm.s.paStatNestedExitReasonR0 = NIL_RTR0PTR;
2025 }
2026# endif
2027#endif
2028
2029#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2030 memset(pVCpu->hm.s.vmx.VmcsCache.aMagic, 0, sizeof(pVCpu->hm.s.vmx.VmcsCache.aMagic));
2031 pVCpu->hm.s.vmx.VmcsCache.uMagic = 0;
2032 pVCpu->hm.s.vmx.VmcsCache.uPos = 0xffffffff;
2033#endif
2034 }
2035 return 0;
2036}
2037
2038
2039/**
2040 * Resets a virtual CPU.
2041 *
2042 * Used by HMR3Reset and CPU hot plugging.
2043 *
2044 * @param pVCpu The cross context virtual CPU structure to reset.
2045 */
2046VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu)
2047{
2048 /* Sync. entire state on VM reset ring-0 re-entry. It's safe to reset
2049 the HM flags here, all other EMTs are in ring-3. See VMR3Reset(). */
2050 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
2051
2052 pVCpu->hm.s.fActive = false;
2053 pVCpu->hm.s.Event.fPending = false;
2054 pVCpu->hm.s.vmx.u64GstMsrApicBase = 0;
2055 pVCpu->hm.s.vmx.VmcsInfo.fSwitchedTo64on32 = false;
2056 pVCpu->hm.s.vmx.VmcsInfo.fWasInRealMode = true;
2057#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2058 if (pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmx)
2059 {
2060 pVCpu->hm.s.vmx.VmcsInfoNstGst.fSwitchedTo64on32 = false;
2061 pVCpu->hm.s.vmx.VmcsInfoNstGst.fWasInRealMode = true;
2062 }
2063#endif
2064
2065 /* Reset the contents of the read cache. */
2066 PVMXVMCSCACHE pVmcsCache = &pVCpu->hm.s.vmx.VmcsCache;
2067 for (unsigned j = 0; j < pVmcsCache->Read.cValidEntries; j++)
2068 pVmcsCache->Read.aFieldVal[j] = 0;
2069
2070#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2071 /* Magic marker for searching in crash dumps. */
2072 strcpy((char *)pVmcsCache->aMagic, "VMCSCACHE Magic");
2073 pVmcsCache->uMagic = UINT64_C(0xdeadbeefdeadbeef);
2074#endif
2075}
2076
2077
2078/**
2079 * The VM is being reset.
2080 *
2081 * For the HM component this means that any GDT/LDT/TSS monitors
2082 * needs to be removed.
2083 *
2084 * @param pVM The cross context VM structure.
2085 */
2086VMMR3_INT_DECL(void) HMR3Reset(PVM pVM)
2087{
2088 LogFlow(("HMR3Reset:\n"));
2089
2090 if (HMIsEnabled(pVM))
2091 hmR3DisableRawMode(pVM);
2092
2093 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2094 {
2095 PVMCPU pVCpu = &pVM->aCpus[i];
2096
2097 HMR3ResetCpu(pVCpu);
2098 }
2099
2100 /* Clear all patch information. */
2101 pVM->hm.s.pGuestPatchMem = 0;
2102 pVM->hm.s.pFreeGuestPatchMem = 0;
2103 pVM->hm.s.cbGuestPatchMem = 0;
2104 pVM->hm.s.cPatches = 0;
2105 pVM->hm.s.PatchTree = 0;
2106 pVM->hm.s.fTPRPatchingActive = false;
2107 ASMMemZero32(pVM->hm.s.aPatches, sizeof(pVM->hm.s.aPatches));
2108}
2109
2110
2111/**
2112 * Callback to patch a TPR instruction (vmmcall or mov cr8).
2113 *
2114 * @returns VBox strict status code.
2115 * @param pVM The cross context VM structure.
2116 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2117 * @param pvUser Unused.
2118 */
2119static DECLCALLBACK(VBOXSTRICTRC) hmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
2120{
2121 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2122
2123 /* Only execute the handler on the VCPU the original patch request was issued. */
2124 if (pVCpu->idCpu != idCpu)
2125 return VINF_SUCCESS;
2126
2127 Log(("hmR3RemovePatches\n"));
2128 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
2129 {
2130 uint8_t abInstr[15];
2131 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
2132 RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
2133 int rc;
2134
2135#ifdef LOG_ENABLED
2136 char szOutput[256];
2137 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2138 szOutput, sizeof(szOutput), NULL);
2139 if (RT_SUCCESS(rc))
2140 Log(("Patched instr: %s\n", szOutput));
2141#endif
2142
2143 /* Check if the instruction is still the same. */
2144 rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pInstrGC, pPatch->cbNewOp);
2145 if (rc != VINF_SUCCESS)
2146 {
2147 Log(("Patched code removed? (rc=%Rrc0\n", rc));
2148 continue; /* swapped out or otherwise removed; skip it. */
2149 }
2150
2151 if (memcmp(abInstr, pPatch->aNewOpcode, pPatch->cbNewOp))
2152 {
2153 Log(("Patched instruction was changed! (rc=%Rrc0\n", rc));
2154 continue; /* skip it. */
2155 }
2156
2157 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pInstrGC, pPatch->aOpcode, pPatch->cbOp);
2158 AssertRC(rc);
2159
2160#ifdef LOG_ENABLED
2161 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2162 szOutput, sizeof(szOutput), NULL);
2163 if (RT_SUCCESS(rc))
2164 Log(("Original instr: %s\n", szOutput));
2165#endif
2166 }
2167 pVM->hm.s.cPatches = 0;
2168 pVM->hm.s.PatchTree = 0;
2169 pVM->hm.s.pFreeGuestPatchMem = pVM->hm.s.pGuestPatchMem;
2170 pVM->hm.s.fTPRPatchingActive = false;
2171 return VINF_SUCCESS;
2172}
2173
2174
2175/**
2176 * Worker for enabling patching in a VT-x/AMD-V guest.
2177 *
2178 * @returns VBox status code.
2179 * @param pVM The cross context VM structure.
2180 * @param idCpu VCPU to execute hmR3RemovePatches on.
2181 * @param pPatchMem Patch memory range.
2182 * @param cbPatchMem Size of the memory range.
2183 */
2184static int hmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
2185{
2186 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches, (void *)(uintptr_t)idCpu);
2187 AssertRC(rc);
2188
2189 pVM->hm.s.pGuestPatchMem = pPatchMem;
2190 pVM->hm.s.pFreeGuestPatchMem = pPatchMem;
2191 pVM->hm.s.cbGuestPatchMem = cbPatchMem;
2192 return VINF_SUCCESS;
2193}
2194
2195
2196/**
2197 * Enable patching in a VT-x/AMD-V guest
2198 *
2199 * @returns VBox status code.
2200 * @param pVM The cross context VM structure.
2201 * @param pPatchMem Patch memory range.
2202 * @param cbPatchMem Size of the memory range.
2203 */
2204VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
2205{
2206 VM_ASSERT_EMT(pVM);
2207 Log(("HMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
2208 if (pVM->cCpus > 1)
2209 {
2210 /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
2211 int rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE,
2212 (PFNRT)hmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
2213 AssertRC(rc);
2214 return rc;
2215 }
2216 return hmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
2217}
2218
2219
2220/**
2221 * Disable patching in a VT-x/AMD-V guest.
2222 *
2223 * @returns VBox status code.
2224 * @param pVM The cross context VM structure.
2225 * @param pPatchMem Patch memory range.
2226 * @param cbPatchMem Size of the memory range.
2227 */
2228VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
2229{
2230 Log(("HMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
2231 RT_NOREF2(pPatchMem, cbPatchMem);
2232
2233 Assert(pVM->hm.s.pGuestPatchMem == pPatchMem);
2234 Assert(pVM->hm.s.cbGuestPatchMem == cbPatchMem);
2235
2236 /** @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
2237 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches,
2238 (void *)(uintptr_t)VMMGetCpuId(pVM));
2239 AssertRC(rc);
2240
2241 pVM->hm.s.pGuestPatchMem = 0;
2242 pVM->hm.s.pFreeGuestPatchMem = 0;
2243 pVM->hm.s.cbGuestPatchMem = 0;
2244 pVM->hm.s.fTPRPatchingActive = false;
2245 return VINF_SUCCESS;
2246}
2247
2248
2249/**
2250 * Callback to patch a TPR instruction (vmmcall or mov cr8).
2251 *
2252 * @returns VBox strict status code.
2253 * @param pVM The cross context VM structure.
2254 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2255 * @param pvUser User specified CPU context.
2256 *
2257 */
2258static DECLCALLBACK(VBOXSTRICTRC) hmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
2259{
2260 /*
2261 * Only execute the handler on the VCPU the original patch request was
2262 * issued. (The other CPU(s) might not yet have switched to protected
2263 * mode, nor have the correct memory context.)
2264 */
2265 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2266 if (pVCpu->idCpu != idCpu)
2267 return VINF_SUCCESS;
2268
2269 /*
2270 * We're racing other VCPUs here, so don't try patch the instruction twice
2271 * and make sure there is still room for our patch record.
2272 */
2273 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2274 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
2275 if (pPatch)
2276 {
2277 Log(("hmR3ReplaceTprInstr: already patched %RGv\n", pCtx->rip));
2278 return VINF_SUCCESS;
2279 }
2280 uint32_t const idx = pVM->hm.s.cPatches;
2281 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
2282 {
2283 Log(("hmR3ReplaceTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
2284 return VINF_SUCCESS;
2285 }
2286 pPatch = &pVM->hm.s.aPatches[idx];
2287
2288 Log(("hmR3ReplaceTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
2289
2290 /*
2291 * Disassembler the instruction and get cracking.
2292 */
2293 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3ReplaceTprInstr");
2294 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
2295 uint32_t cbOp;
2296 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
2297 AssertRC(rc);
2298 if ( rc == VINF_SUCCESS
2299 && pDis->pCurInstr->uOpcode == OP_MOV
2300 && cbOp >= 3)
2301 {
2302 static uint8_t const s_abVMMCall[3] = { 0x0f, 0x01, 0xd9 };
2303
2304 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
2305 AssertRC(rc);
2306
2307 pPatch->cbOp = cbOp;
2308
2309 if (pDis->Param1.fUse == DISUSE_DISPLACEMENT32)
2310 {
2311 /* write. */
2312 if (pDis->Param2.fUse == DISUSE_REG_GEN32)
2313 {
2314 pPatch->enmType = HMTPRINSTR_WRITE_REG;
2315 pPatch->uSrcOperand = pDis->Param2.Base.idxGenReg;
2316 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_REG %u\n", pDis->Param2.Base.idxGenReg));
2317 }
2318 else
2319 {
2320 Assert(pDis->Param2.fUse == DISUSE_IMMEDIATE32);
2321 pPatch->enmType = HMTPRINSTR_WRITE_IMM;
2322 pPatch->uSrcOperand = pDis->Param2.uValue;
2323 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_IMM %#llx\n", pDis->Param2.uValue));
2324 }
2325 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
2326 AssertRC(rc);
2327
2328 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
2329 pPatch->cbNewOp = sizeof(s_abVMMCall);
2330 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessVmc);
2331 }
2332 else
2333 {
2334 /*
2335 * TPR Read.
2336 *
2337 * Found:
2338 * mov eax, dword [fffe0080] (5 bytes)
2339 * Check if next instruction is:
2340 * shr eax, 4
2341 */
2342 Assert(pDis->Param1.fUse == DISUSE_REG_GEN32);
2343
2344 uint8_t const idxMmioReg = pDis->Param1.Base.idxGenReg;
2345 uint8_t const cbOpMmio = cbOp;
2346 uint64_t const uSavedRip = pCtx->rip;
2347
2348 pCtx->rip += cbOp;
2349 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
2350 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Following read");
2351 pCtx->rip = uSavedRip;
2352
2353 if ( rc == VINF_SUCCESS
2354 && pDis->pCurInstr->uOpcode == OP_SHR
2355 && pDis->Param1.fUse == DISUSE_REG_GEN32
2356 && pDis->Param1.Base.idxGenReg == idxMmioReg
2357 && pDis->Param2.fUse == DISUSE_IMMEDIATE8
2358 && pDis->Param2.uValue == 4
2359 && cbOpMmio + cbOp < sizeof(pVM->hm.s.aPatches[idx].aOpcode))
2360 {
2361 uint8_t abInstr[15];
2362
2363 /* Replacing the two instructions above with an AMD-V specific lock-prefixed 32-bit MOV CR8 instruction so as to
2364 access CR8 in 32-bit mode and not cause a #VMEXIT. */
2365 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pPatch->aOpcode, pCtx->rip, cbOpMmio + cbOp);
2366 AssertRC(rc);
2367
2368 pPatch->cbOp = cbOpMmio + cbOp;
2369
2370 /* 0xf0, 0x0f, 0x20, 0xc0 = mov eax, cr8 */
2371 abInstr[0] = 0xf0;
2372 abInstr[1] = 0x0f;
2373 abInstr[2] = 0x20;
2374 abInstr[3] = 0xc0 | pDis->Param1.Base.idxGenReg;
2375 for (unsigned i = 4; i < pPatch->cbOp; i++)
2376 abInstr[i] = 0x90; /* nop */
2377
2378 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, abInstr, pPatch->cbOp);
2379 AssertRC(rc);
2380
2381 memcpy(pPatch->aNewOpcode, abInstr, pPatch->cbOp);
2382 pPatch->cbNewOp = pPatch->cbOp;
2383 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessCr8);
2384
2385 Log(("Acceptable read/shr candidate!\n"));
2386 pPatch->enmType = HMTPRINSTR_READ_SHR4;
2387 }
2388 else
2389 {
2390 pPatch->enmType = HMTPRINSTR_READ;
2391 pPatch->uDstOperand = idxMmioReg;
2392
2393 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
2394 AssertRC(rc);
2395
2396 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
2397 pPatch->cbNewOp = sizeof(s_abVMMCall);
2398 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccessVmc);
2399 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_READ %u\n", pPatch->uDstOperand));
2400 }
2401 }
2402
2403 pPatch->Core.Key = pCtx->eip;
2404 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2405 AssertRC(rc);
2406
2407 pVM->hm.s.cPatches++;
2408 return VINF_SUCCESS;
2409 }
2410
2411 /*
2412 * Save invalid patch, so we will not try again.
2413 */
2414 Log(("hmR3ReplaceTprInstr: Failed to patch instr!\n"));
2415 pPatch->Core.Key = pCtx->eip;
2416 pPatch->enmType = HMTPRINSTR_INVALID;
2417 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2418 AssertRC(rc);
2419 pVM->hm.s.cPatches++;
2420 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceFailure);
2421 return VINF_SUCCESS;
2422}
2423
2424
2425/**
2426 * Callback to patch a TPR instruction (jump to generated code).
2427 *
2428 * @returns VBox strict status code.
2429 * @param pVM The cross context VM structure.
2430 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2431 * @param pvUser User specified CPU context.
2432 *
2433 */
2434static DECLCALLBACK(VBOXSTRICTRC) hmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
2435{
2436 /*
2437 * Only execute the handler on the VCPU the original patch request was
2438 * issued. (The other CPU(s) might not yet have switched to protected
2439 * mode, nor have the correct memory context.)
2440 */
2441 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
2442 if (pVCpu->idCpu != idCpu)
2443 return VINF_SUCCESS;
2444
2445 /*
2446 * We're racing other VCPUs here, so don't try patch the instruction twice
2447 * and make sure there is still room for our patch record.
2448 */
2449 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2450 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
2451 if (pPatch)
2452 {
2453 Log(("hmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
2454 return VINF_SUCCESS;
2455 }
2456 uint32_t const idx = pVM->hm.s.cPatches;
2457 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
2458 {
2459 Log(("hmR3PatchTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
2460 return VINF_SUCCESS;
2461 }
2462 pPatch = &pVM->hm.s.aPatches[idx];
2463
2464 Log(("hmR3PatchTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
2465 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "hmR3PatchTprInstr");
2466
2467 /*
2468 * Disassemble the instruction and get cracking.
2469 */
2470 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
2471 uint32_t cbOp;
2472 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
2473 AssertRC(rc);
2474 if ( rc == VINF_SUCCESS
2475 && pDis->pCurInstr->uOpcode == OP_MOV
2476 && cbOp >= 5)
2477 {
2478 uint8_t aPatch[64];
2479 uint32_t off = 0;
2480
2481 rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
2482 AssertRC(rc);
2483
2484 pPatch->cbOp = cbOp;
2485 pPatch->enmType = HMTPRINSTR_JUMP_REPLACEMENT;
2486
2487 if (pDis->Param1.fUse == DISUSE_DISPLACEMENT32)
2488 {
2489 /*
2490 * TPR write:
2491 *
2492 * push ECX [51]
2493 * push EDX [52]
2494 * push EAX [50]
2495 * xor EDX,EDX [31 D2]
2496 * mov EAX,EAX [89 C0]
2497 * or
2498 * mov EAX,0000000CCh [B8 CC 00 00 00]
2499 * mov ECX,0C0000082h [B9 82 00 00 C0]
2500 * wrmsr [0F 30]
2501 * pop EAX [58]
2502 * pop EDX [5A]
2503 * pop ECX [59]
2504 * jmp return_address [E9 return_address]
2505 */
2506 bool fUsesEax = (pDis->Param2.fUse == DISUSE_REG_GEN32 && pDis->Param2.Base.idxGenReg == DISGREG_EAX);
2507
2508 aPatch[off++] = 0x51; /* push ecx */
2509 aPatch[off++] = 0x52; /* push edx */
2510 if (!fUsesEax)
2511 aPatch[off++] = 0x50; /* push eax */
2512 aPatch[off++] = 0x31; /* xor edx, edx */
2513 aPatch[off++] = 0xd2;
2514 if (pDis->Param2.fUse == DISUSE_REG_GEN32)
2515 {
2516 if (!fUsesEax)
2517 {
2518 aPatch[off++] = 0x89; /* mov eax, src_reg */
2519 aPatch[off++] = MAKE_MODRM(3, pDis->Param2.Base.idxGenReg, DISGREG_EAX);
2520 }
2521 }
2522 else
2523 {
2524 Assert(pDis->Param2.fUse == DISUSE_IMMEDIATE32);
2525 aPatch[off++] = 0xb8; /* mov eax, immediate */
2526 *(uint32_t *)&aPatch[off] = pDis->Param2.uValue;
2527 off += sizeof(uint32_t);
2528 }
2529 aPatch[off++] = 0xb9; /* mov ecx, 0xc0000082 */
2530 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2531 off += sizeof(uint32_t);
2532
2533 aPatch[off++] = 0x0f; /* wrmsr */
2534 aPatch[off++] = 0x30;
2535 if (!fUsesEax)
2536 aPatch[off++] = 0x58; /* pop eax */
2537 aPatch[off++] = 0x5a; /* pop edx */
2538 aPatch[off++] = 0x59; /* pop ecx */
2539 }
2540 else
2541 {
2542 /*
2543 * TPR read:
2544 *
2545 * push ECX [51]
2546 * push EDX [52]
2547 * push EAX [50]
2548 * mov ECX,0C0000082h [B9 82 00 00 C0]
2549 * rdmsr [0F 32]
2550 * mov EAX,EAX [89 C0]
2551 * pop EAX [58]
2552 * pop EDX [5A]
2553 * pop ECX [59]
2554 * jmp return_address [E9 return_address]
2555 */
2556 Assert(pDis->Param1.fUse == DISUSE_REG_GEN32);
2557
2558 if (pDis->Param1.Base.idxGenReg != DISGREG_ECX)
2559 aPatch[off++] = 0x51; /* push ecx */
2560 if (pDis->Param1.Base.idxGenReg != DISGREG_EDX )
2561 aPatch[off++] = 0x52; /* push edx */
2562 if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
2563 aPatch[off++] = 0x50; /* push eax */
2564
2565 aPatch[off++] = 0x31; /* xor edx, edx */
2566 aPatch[off++] = 0xd2;
2567
2568 aPatch[off++] = 0xb9; /* mov ecx, 0xc0000082 */
2569 *(uint32_t *)&aPatch[off] = MSR_K8_LSTAR;
2570 off += sizeof(uint32_t);
2571
2572 aPatch[off++] = 0x0f; /* rdmsr */
2573 aPatch[off++] = 0x32;
2574
2575 if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
2576 {
2577 aPatch[off++] = 0x89; /* mov dst_reg, eax */
2578 aPatch[off++] = MAKE_MODRM(3, DISGREG_EAX, pDis->Param1.Base.idxGenReg);
2579 }
2580
2581 if (pDis->Param1.Base.idxGenReg != DISGREG_EAX)
2582 aPatch[off++] = 0x58; /* pop eax */
2583 if (pDis->Param1.Base.idxGenReg != DISGREG_EDX )
2584 aPatch[off++] = 0x5a; /* pop edx */
2585 if (pDis->Param1.Base.idxGenReg != DISGREG_ECX)
2586 aPatch[off++] = 0x59; /* pop ecx */
2587 }
2588 aPatch[off++] = 0xe9; /* jmp return_address */
2589 *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem + off + 4);
2590 off += sizeof(RTRCUINTPTR);
2591
2592 if (pVM->hm.s.pFreeGuestPatchMem + off <= pVM->hm.s.pGuestPatchMem + pVM->hm.s.cbGuestPatchMem)
2593 {
2594 /* Write new code to the patch buffer. */
2595 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hm.s.pFreeGuestPatchMem, aPatch, off);
2596 AssertRC(rc);
2597
2598#ifdef LOG_ENABLED
2599 uint32_t cbCurInstr;
2600 for (RTGCPTR GCPtrInstr = pVM->hm.s.pFreeGuestPatchMem;
2601 GCPtrInstr < pVM->hm.s.pFreeGuestPatchMem + off;
2602 GCPtrInstr += RT_MAX(cbCurInstr, 1))
2603 {
2604 char szOutput[256];
2605 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, GCPtrInstr, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2606 szOutput, sizeof(szOutput), &cbCurInstr);
2607 if (RT_SUCCESS(rc))
2608 Log(("Patch instr %s\n", szOutput));
2609 else
2610 Log(("%RGv: rc=%Rrc\n", GCPtrInstr, rc));
2611 }
2612#endif
2613
2614 pPatch->aNewOpcode[0] = 0xE9;
2615 *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
2616
2617 /* Overwrite the TPR instruction with a jump. */
2618 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->eip, pPatch->aNewOpcode, 5);
2619 AssertRC(rc);
2620
2621 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Jump");
2622
2623 pVM->hm.s.pFreeGuestPatchMem += off;
2624 pPatch->cbNewOp = 5;
2625
2626 pPatch->Core.Key = pCtx->eip;
2627 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2628 AssertRC(rc);
2629
2630 pVM->hm.s.cPatches++;
2631 pVM->hm.s.fTPRPatchingActive = true;
2632 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchSuccess);
2633 return VINF_SUCCESS;
2634 }
2635
2636 Log(("Ran out of space in our patch buffer!\n"));
2637 }
2638 else
2639 Log(("hmR3PatchTprInstr: Failed to patch instr!\n"));
2640
2641
2642 /*
2643 * Save invalid patch, so we will not try again.
2644 */
2645 pPatch = &pVM->hm.s.aPatches[idx];
2646 pPatch->Core.Key = pCtx->eip;
2647 pPatch->enmType = HMTPRINSTR_INVALID;
2648 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2649 AssertRC(rc);
2650 pVM->hm.s.cPatches++;
2651 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchFailure);
2652 return VINF_SUCCESS;
2653}
2654
2655
2656/**
2657 * Attempt to patch TPR mmio instructions.
2658 *
2659 * @returns VBox status code.
2660 * @param pVM The cross context VM structure.
2661 * @param pVCpu The cross context virtual CPU structure.
2662 */
2663VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu)
2664{
2665 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE,
2666 pVM->hm.s.pGuestPatchMem ? hmR3PatchTprInstr : hmR3ReplaceTprInstr,
2667 (void *)(uintptr_t)pVCpu->idCpu);
2668 AssertRC(rc);
2669 return rc;
2670}
2671
2672
2673/**
2674 * Checks if we need to reschedule due to VMM device heap changes.
2675 *
2676 * @returns true if a reschedule is required, otherwise false.
2677 * @param pVM The cross context VM structure.
2678 * @param pCtx VM execution context.
2679 */
2680VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCCPUMCTX pCtx)
2681{
2682 /*
2683 * The VMM device heap is a requirement for emulating real-mode or protected-mode without paging
2684 * when the unrestricted guest execution feature is missing (VT-x only).
2685 */
2686 if ( pVM->hm.s.vmx.fEnabled
2687 && !pVM->hm.s.vmx.fUnrestrictedGuest
2688 && CPUMIsGuestInRealModeEx(pCtx)
2689 && !PDMVmmDevHeapIsEnabled(pVM))
2690 return true;
2691
2692 return false;
2693}
2694
2695
2696/**
2697 * Noticiation callback from DBGF when interrupt breakpoints or generic debug
2698 * event settings changes.
2699 *
2700 * DBGF will call HMR3NotifyDebugEventChangedPerCpu on each CPU afterwards, this
2701 * function is just updating the VM globals.
2702 *
2703 * @param pVM The VM cross context VM structure.
2704 * @thread EMT(0)
2705 */
2706VMMR3_INT_DECL(void) HMR3NotifyDebugEventChanged(PVM pVM)
2707{
2708 /* Interrupts. */
2709 bool fUseDebugLoop = pVM->dbgf.ro.cSoftIntBreakpoints > 0
2710 || pVM->dbgf.ro.cHardIntBreakpoints > 0;
2711
2712 /* CPU Exceptions. */
2713 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_XCPT_FIRST;
2714 !fUseDebugLoop && enmEvent <= DBGFEVENT_XCPT_LAST;
2715 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2716 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2717
2718 /* Common VM exits. */
2719 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_FIRST;
2720 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_LAST_COMMON;
2721 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2722 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2723
2724 /* Vendor specific VM exits. */
2725 if (HMR3IsVmxEnabled(pVM->pUVM))
2726 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_VMX_FIRST;
2727 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_VMX_LAST;
2728 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2729 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2730 else
2731 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_SVM_FIRST;
2732 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_SVM_LAST;
2733 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
2734 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
2735
2736 /* Done. */
2737 pVM->hm.s.fUseDebugLoop = fUseDebugLoop;
2738}
2739
2740
2741/**
2742 * Follow up notification callback to HMR3NotifyDebugEventChanged for each CPU.
2743 *
2744 * HM uses this to combine the decision made by HMR3NotifyDebugEventChanged with
2745 * per CPU settings.
2746 *
2747 * @param pVM The VM cross context VM structure.
2748 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2749 */
2750VMMR3_INT_DECL(void) HMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu)
2751{
2752 pVCpu->hm.s.fUseDebugLoop = pVCpu->hm.s.fSingleInstruction | pVM->hm.s.fUseDebugLoop;
2753}
2754
2755
2756/**
2757 * Checks if we are currently using hardware acceleration.
2758 *
2759 * @returns true if hardware acceleration is being used, otherwise false.
2760 * @param pVCpu The cross context virtual CPU structure.
2761 */
2762VMMR3_INT_DECL(bool) HMR3IsActive(PCVMCPU pVCpu)
2763{
2764 return pVCpu->hm.s.fActive;
2765}
2766
2767
2768/**
2769 * External interface for querying whether hardware acceleration is enabled.
2770 *
2771 * @returns true if VT-x or AMD-V is being used, otherwise false.
2772 * @param pUVM The user mode VM handle.
2773 * @sa HMIsEnabled, HMIsEnabledNotMacro.
2774 */
2775VMMR3DECL(bool) HMR3IsEnabled(PUVM pUVM)
2776{
2777 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2778 PVM pVM = pUVM->pVM;
2779 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2780 return pVM->fHMEnabled; /* Don't use the macro as the GUI may query us very very early. */
2781}
2782
2783
2784/**
2785 * External interface for querying whether VT-x is being used.
2786 *
2787 * @returns true if VT-x is being used, otherwise false.
2788 * @param pUVM The user mode VM handle.
2789 * @sa HMR3IsSvmEnabled, HMIsEnabled
2790 */
2791VMMR3DECL(bool) HMR3IsVmxEnabled(PUVM pUVM)
2792{
2793 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2794 PVM pVM = pUVM->pVM;
2795 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2796 return pVM->hm.s.vmx.fEnabled
2797 && pVM->hm.s.vmx.fSupported
2798 && pVM->fHMEnabled;
2799}
2800
2801
2802/**
2803 * External interface for querying whether AMD-V is being used.
2804 *
2805 * @returns true if VT-x is being used, otherwise false.
2806 * @param pUVM The user mode VM handle.
2807 * @sa HMR3IsVmxEnabled, HMIsEnabled
2808 */
2809VMMR3DECL(bool) HMR3IsSvmEnabled(PUVM pUVM)
2810{
2811 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2812 PVM pVM = pUVM->pVM;
2813 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2814 return pVM->hm.s.svm.fEnabled
2815 && pVM->hm.s.svm.fSupported
2816 && pVM->fHMEnabled;
2817}
2818
2819
2820/**
2821 * Checks if we are currently using nested paging.
2822 *
2823 * @returns true if nested paging is being used, otherwise false.
2824 * @param pUVM The user mode VM handle.
2825 */
2826VMMR3DECL(bool) HMR3IsNestedPagingActive(PUVM pUVM)
2827{
2828 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2829 PVM pVM = pUVM->pVM;
2830 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2831 return pVM->hm.s.fNestedPaging;
2832}
2833
2834
2835/**
2836 * Checks if virtualized APIC registers is enabled.
2837 *
2838 * When enabled this feature allows the hardware to access most of the
2839 * APIC registers in the virtual-APIC page without causing VM-exits. See
2840 * Intel spec. 29.1.1 "Virtualized APIC Registers".
2841 *
2842 * @returns true if virtualized APIC registers is enabled, otherwise
2843 * false.
2844 * @param pUVM The user mode VM handle.
2845 */
2846VMMR3DECL(bool) HMR3IsVirtApicRegsEnabled(PUVM pUVM)
2847{
2848 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2849 PVM pVM = pUVM->pVM;
2850 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2851 return pVM->hm.s.fVirtApicRegs;
2852}
2853
2854
2855/**
2856 * Checks if APIC posted-interrupt processing is enabled.
2857 *
2858 * This returns whether we can deliver interrupts to the guest without
2859 * leaving guest-context by updating APIC state from host-context.
2860 *
2861 * @returns true if APIC posted-interrupt processing is enabled,
2862 * otherwise false.
2863 * @param pUVM The user mode VM handle.
2864 */
2865VMMR3DECL(bool) HMR3IsPostedIntrsEnabled(PUVM pUVM)
2866{
2867 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2868 PVM pVM = pUVM->pVM;
2869 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2870 return pVM->hm.s.fPostedIntrs;
2871}
2872
2873
2874/**
2875 * Checks if we are currently using VPID in VT-x mode.
2876 *
2877 * @returns true if VPID is being used, otherwise false.
2878 * @param pUVM The user mode VM handle.
2879 */
2880VMMR3DECL(bool) HMR3IsVpidActive(PUVM pUVM)
2881{
2882 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2883 PVM pVM = pUVM->pVM;
2884 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2885 return pVM->hm.s.vmx.fVpid;
2886}
2887
2888
2889/**
2890 * Checks if we are currently using VT-x unrestricted execution,
2891 * aka UX.
2892 *
2893 * @returns true if UX is being used, otherwise false.
2894 * @param pUVM The user mode VM handle.
2895 */
2896VMMR3DECL(bool) HMR3IsUXActive(PUVM pUVM)
2897{
2898 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2899 PVM pVM = pUVM->pVM;
2900 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2901 return pVM->hm.s.vmx.fUnrestrictedGuest
2902 || pVM->hm.s.svm.fSupported;
2903}
2904
2905
2906/**
2907 * Checks if the VMX-preemption timer is being used.
2908 *
2909 * @returns true if the VMX-preemption timer is being used, otherwise false.
2910 * @param pVM The cross context VM structure.
2911 */
2912VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM)
2913{
2914 return HMIsEnabled(pVM)
2915 && pVM->hm.s.vmx.fEnabled
2916 && pVM->hm.s.vmx.fUsePreemptTimer;
2917}
2918
2919
2920/**
2921 * Helper for HMR3CheckError to log VMCS controls to the release log.
2922 *
2923 * @param idCpu The Virtual CPU ID.
2924 * @param pVmcsInfo The VMCS info. object.
2925 */
2926static void hmR3CheckErrorLogVmcsCtls(VMCPUID idCpu, PCVMXVMCSINFO pVmcsInfo)
2927{
2928 LogRel(("HM: CPU[%u] PinCtls %#RX32\n", idCpu, pVmcsInfo->u32PinCtls));
2929 {
2930 uint32_t const u32Val = pVmcsInfo->u32PinCtls;
2931 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_EXT_INT_EXIT );
2932 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_NMI_EXIT );
2933 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_VIRT_NMI );
2934 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_PREEMPT_TIMER);
2935 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_POSTED_INT );
2936 }
2937 LogRel(("HM: CPU[%u] ProcCtls %#RX32\n", idCpu, pVmcsInfo->u32ProcCtls));
2938 {
2939 uint32_t const u32Val = pVmcsInfo->u32ProcCtls;
2940 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INT_WINDOW_EXIT );
2941 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TSC_OFFSETTING);
2942 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_HLT_EXIT );
2943 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INVLPG_EXIT );
2944 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MWAIT_EXIT );
2945 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_RDPMC_EXIT );
2946 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_RDTSC_EXIT );
2947 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR3_LOAD_EXIT );
2948 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR3_STORE_EXIT );
2949 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR8_LOAD_EXIT );
2950 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_CR8_STORE_EXIT );
2951 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TPR_SHADOW );
2952 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_NMI_WINDOW_EXIT );
2953 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MOV_DR_EXIT );
2954 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_UNCOND_IO_EXIT );
2955 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_IO_BITMAPS );
2956 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MONITOR_TRAP_FLAG );
2957 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_MSR_BITMAPS );
2958 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_MONITOR_EXIT );
2959 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_PAUSE_EXIT );
2960 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_SECONDARY_CTLS);
2961 }
2962 LogRel(("HM: CPU[%u] ProcCtls2 %#RX32\n", idCpu, pVmcsInfo->u32ProcCtls2));
2963 {
2964 uint32_t const u32Val = pVmcsInfo->u32ProcCtls2;
2965 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_APIC_ACCESS );
2966 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT );
2967 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_DESC_TABLE_EXIT );
2968 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDTSCP );
2969 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_X2APIC_MODE );
2970 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VPID );
2971 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_WBINVD_EXIT );
2972 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_UNRESTRICTED_GUEST );
2973 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_APIC_REG_VIRT );
2974 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_INT_DELIVERY );
2975 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT );
2976 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDRAND_EXIT );
2977 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_INVPCID );
2978 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VMFUNC );
2979 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VMCS_SHADOWING );
2980 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_ENCLS_EXIT );
2981 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_RDSEED_EXIT );
2982 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PML );
2983 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT_VE );
2984 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_CONCEAL_VMX_FROM_PT);
2985 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_XSAVES_XRSTORS );
2986 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_MODE_BASED_EPT_PERM);
2987 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_SPPTP_EPT );
2988 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_PT_EPT );
2989 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_TSC_SCALING );
2990 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_USER_WAIT_PAUSE );
2991 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_ENCLV_EXIT );
2992 }
2993 LogRel(("HM: CPU[%u] EntryCtls %#RX32\n", idCpu, pVmcsInfo->u32EntryCtls));
2994 {
2995 uint32_t const u32Val = pVmcsInfo->u32EntryCtls;
2996 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_DEBUG );
2997 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_IA32E_MODE_GUEST );
2998 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_ENTRY_TO_SMM );
2999 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON);
3000 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PERF_MSR );
3001 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_PAT_MSR );
3002 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_EFER_MSR );
3003 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR );
3004 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT);
3005 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_RTIT_CTL_MSR );
3006 }
3007 LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", idCpu, pVmcsInfo->u32ExitCtls));
3008 {
3009 uint32_t const u32Val = pVmcsInfo->u32ExitCtls;
3010 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_DEBUG );
3011 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE );
3012 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PERF_MSR );
3013 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_ACK_EXT_INT );
3014 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PAT_MSR );
3015 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_PAT_MSR );
3016 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_EFER_MSR );
3017 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_LOAD_EFER_MSR );
3018 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER );
3019 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR );
3020 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT );
3021 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_CLEAR_RTIT_CTL_MSR );
3022 }
3023}
3024
3025
3026/**
3027 * Check fatal VT-x/AMD-V error and produce some meaningful
3028 * log release message.
3029 *
3030 * @param pVM The cross context VM structure.
3031 * @param iStatusCode VBox status code.
3032 */
3033VMMR3_INT_DECL(void) HMR3CheckError(PVM pVM, int iStatusCode)
3034{
3035 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3036 {
3037 /** @todo r=ramshankar: Are all EMTs out of ring-0 at this point!? If not, we
3038 * might be getting inaccurate values for non-guru'ing EMTs. */
3039 PVMCPU pVCpu = &pVM->aCpus[i];
3040 PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
3041 bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs;
3042 switch (iStatusCode)
3043 {
3044 case VERR_VMX_INVALID_VMCS_PTR:
3045 {
3046 LogRel(("HM: VERR_VMX_INVALID_VMCS_PTR:\n"));
3047 LogRel(("HM: CPU[%u] %s VMCS active\n", i, fNstGstVmcsActive ? "Nested-guest" : "Guest"));
3048 LogRel(("HM: CPU[%u] Current pointer %#RHp vs %#RHp\n", i, pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs,
3049 pVmcsInfo->HCPhysVmcs));
3050 LogRel(("HM: CPU[%u] Current VMCS version %#x\n", i, pVCpu->hm.s.vmx.LastError.u32VmcsRev));
3051 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
3052 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
3053 break;
3054 }
3055
3056 case VERR_VMX_UNABLE_TO_START_VM:
3057 {
3058 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM:\n"));
3059 LogRel(("HM: CPU[%u] %s VMCS active\n", i, fNstGstVmcsActive ? "Nested-guest" : "Guest"));
3060 LogRel(("HM: CPU[%u] Instruction error %#x\n", i, pVCpu->hm.s.vmx.LastError.u32InstrError));
3061 LogRel(("HM: CPU[%u] Exit reason %#x\n", i, pVCpu->hm.s.vmx.LastError.u32ExitReason));
3062
3063 if ( pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS
3064 || pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS)
3065 {
3066 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
3067 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
3068 }
3069 else if (pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTLS)
3070 {
3071 hmR3CheckErrorLogVmcsCtls(i, pVmcsInfo);
3072 LogRel(("HM: CPU[%u] HCPhysMsrBitmap %#RHp\n", i, pVmcsInfo->HCPhysMsrBitmap));
3073 LogRel(("HM: CPU[%u] HCPhysGuestMsrLoad %#RHp\n", i, pVmcsInfo->HCPhysGuestMsrLoad));
3074 LogRel(("HM: CPU[%u] HCPhysGuestMsrStore %#RHp\n", i, pVmcsInfo->HCPhysGuestMsrStore));
3075 LogRel(("HM: CPU[%u] HCPhysHostMsrLoad %#RHp\n", i, pVmcsInfo->HCPhysHostMsrLoad));
3076 LogRel(("HM: CPU[%u] cEntryMsrLoad %u\n", i, pVmcsInfo->cEntryMsrLoad));
3077 LogRel(("HM: CPU[%u] cExitMsrStore %u\n", i, pVmcsInfo->cExitMsrStore));
3078 LogRel(("HM: CPU[%u] cExitMsrLoad %u\n", i, pVmcsInfo->cExitMsrLoad));
3079 }
3080 /** @todo Log VM-entry event injection control fields
3081 * VMX_VMCS_CTRL_ENTRY_IRQ_INFO, VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE
3082 * and VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH from the VMCS. */
3083 break;
3084 }
3085
3086 case VERR_VMX_INVALID_GUEST_STATE:
3087 {
3088 LogRel(("HM: VERR_VMX_INVALID_GUEST_STATE:\n"));
3089 hmR3CheckErrorLogVmcsCtls(i, pVmcsInfo);
3090 break;
3091 }
3092
3093 /* The guru will dump the HM error and exit history. Nothing extra to report for these errors. */
3094 case VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO:
3095 case VERR_VMX_INVALID_VMXON_PTR:
3096 case VERR_VMX_UNEXPECTED_EXIT:
3097 case VERR_VMX_INVALID_VMCS_FIELD:
3098 case VERR_SVM_UNKNOWN_EXIT:
3099 case VERR_SVM_UNEXPECTED_EXIT:
3100 case VERR_SVM_UNEXPECTED_PATCH_TYPE:
3101 case VERR_SVM_UNEXPECTED_XCPT_EXIT:
3102 case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE:
3103 break;
3104 }
3105 }
3106
3107 if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM)
3108 {
3109 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-1 %#RX32\n", pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1));
3110 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-0 %#RX32\n", pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0));
3111 }
3112 else if (iStatusCode == VERR_VMX_INVALID_VMXON_PTR)
3113 LogRel(("HM: HCPhysVmxEnableError = %#RHp\n", pVM->hm.s.vmx.HCPhysVmxEnableError));
3114}
3115
3116
3117/**
3118 * Execute state save operation.
3119 *
3120 * Save only data that cannot be re-loaded while entering HM ring-0 code. This
3121 * is because we always save the VM state from ring-3 and thus most HM state
3122 * will be re-synced dynamically at runtime and don't need to be part of the VM
3123 * saved state.
3124 *
3125 * @returns VBox status code.
3126 * @param pVM The cross context VM structure.
3127 * @param pSSM SSM operation handle.
3128 */
3129static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM)
3130{
3131 int rc;
3132
3133 Log(("hmR3Save:\n"));
3134
3135 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3136 {
3137 Assert(!pVM->aCpus[i].hm.s.Event.fPending);
3138 if (pVM->cpum.ro.GuestFeatures.fSvm)
3139 {
3140 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVM->aCpus[i].hm.s.svm.NstGstVmcbCache;
3141 rc = SSMR3PutBool(pSSM, pVmcbNstGstCache->fCacheValid);
3142 rc |= SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdCRx);
3143 rc |= SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrCRx);
3144 rc |= SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdDRx);
3145 rc |= SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrDRx);
3146 rc |= SSMR3PutU16(pSSM, pVmcbNstGstCache->u16PauseFilterThreshold);
3147 rc |= SSMR3PutU16(pSSM, pVmcbNstGstCache->u16PauseFilterCount);
3148 rc |= SSMR3PutU32(pSSM, pVmcbNstGstCache->u32InterceptXcpt);
3149 rc |= SSMR3PutU64(pSSM, pVmcbNstGstCache->u64InterceptCtrl);
3150 rc |= SSMR3PutU64(pSSM, pVmcbNstGstCache->u64TSCOffset);
3151 rc |= SSMR3PutBool(pSSM, pVmcbNstGstCache->fVIntrMasking);
3152 rc |= SSMR3PutBool(pSSM, pVmcbNstGstCache->fNestedPaging);
3153 rc |= SSMR3PutBool(pSSM, pVmcbNstGstCache->fLbrVirt);
3154 AssertRCReturn(rc, rc);
3155 }
3156 }
3157
3158 /* Save the guest patch data. */
3159 rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem);
3160 rc |= SSMR3PutGCPtr(pSSM, pVM->hm.s.pFreeGuestPatchMem);
3161 rc |= SSMR3PutU32(pSSM, pVM->hm.s.cbGuestPatchMem);
3162
3163 /* Store all the guest patch records too. */
3164 rc |= SSMR3PutU32(pSSM, pVM->hm.s.cPatches);
3165 AssertRCReturn(rc, rc);
3166
3167 for (uint32_t i = 0; i < pVM->hm.s.cPatches; i++)
3168 {
3169 AssertCompileSize(HMTPRINSTR, 4);
3170 PCHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
3171 rc = SSMR3PutU32(pSSM, pPatch->Core.Key);
3172 rc |= SSMR3PutMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
3173 rc |= SSMR3PutU32(pSSM, pPatch->cbOp);
3174 rc |= SSMR3PutMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
3175 rc |= SSMR3PutU32(pSSM, pPatch->cbNewOp);
3176 rc |= SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
3177 rc |= SSMR3PutU32(pSSM, pPatch->uSrcOperand);
3178 rc |= SSMR3PutU32(pSSM, pPatch->uDstOperand);
3179 rc |= SSMR3PutU32(pSSM, pPatch->pJumpTarget);
3180 rc |= SSMR3PutU32(pSSM, pPatch->cFaults);
3181 AssertRCReturn(rc, rc);
3182 }
3183
3184 return VINF_SUCCESS;
3185}
3186
3187
3188/**
3189 * Execute state load operation.
3190 *
3191 * @returns VBox status code.
3192 * @param pVM The cross context VM structure.
3193 * @param pSSM SSM operation handle.
3194 * @param uVersion Data layout version.
3195 * @param uPass The data pass.
3196 */
3197static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3198{
3199 int rc;
3200
3201 LogFlowFunc(("uVersion=%u\n", uVersion));
3202 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
3203
3204 /*
3205 * Validate version.
3206 */
3207 if ( uVersion != HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT
3208 && uVersion != HM_SAVED_STATE_VERSION_TPR_PATCHING
3209 && uVersion != HM_SAVED_STATE_VERSION_NO_TPR_PATCHING
3210 && uVersion != HM_SAVED_STATE_VERSION_2_0_X)
3211 {
3212 AssertMsgFailed(("hmR3Load: Invalid version uVersion=%d!\n", uVersion));
3213 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3214 }
3215
3216 /*
3217 * Load per-VCPU state.
3218 */
3219 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3220 {
3221 if (uVersion >= HM_SAVED_STATE_VERSION_SVM_NESTED_HWVIRT)
3222 {
3223 /* Load the SVM nested hw.virt state if the VM is configured for it. */
3224 if (pVM->cpum.ro.GuestFeatures.fSvm)
3225 {
3226 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVM->aCpus[i].hm.s.svm.NstGstVmcbCache;
3227 rc = SSMR3GetBool(pSSM, &pVmcbNstGstCache->fCacheValid);
3228 rc |= SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdCRx);
3229 rc |= SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrCRx);
3230 rc |= SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdDRx);
3231 rc |= SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrDRx);
3232 rc |= SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16PauseFilterThreshold);
3233 rc |= SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16PauseFilterCount);
3234 rc |= SSMR3GetU32(pSSM, &pVmcbNstGstCache->u32InterceptXcpt);
3235 rc |= SSMR3GetU64(pSSM, &pVmcbNstGstCache->u64InterceptCtrl);
3236 rc |= SSMR3GetU64(pSSM, &pVmcbNstGstCache->u64TSCOffset);
3237 rc |= SSMR3GetBool(pSSM, &pVmcbNstGstCache->fVIntrMasking);
3238 rc |= SSMR3GetBool(pSSM, &pVmcbNstGstCache->fNestedPaging);
3239 rc |= SSMR3GetBool(pSSM, &pVmcbNstGstCache->fLbrVirt);
3240 AssertRCReturn(rc, rc);
3241 }
3242 }
3243 else
3244 {
3245 /* Pending HM event (obsolete for a long time since TPRM holds the info.) */
3246 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.fPending);
3247 rc |= SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.u32ErrCode);
3248 rc |= SSMR3GetU64(pSSM, &pVM->aCpus[i].hm.s.Event.u64IntInfo);
3249
3250 /* VMX fWasInRealMode related data. */
3251 uint32_t uDummy;
3252 rc |= SSMR3GetU32(pSSM, &uDummy); AssertRCReturn(rc, rc);
3253 rc |= SSMR3GetU32(pSSM, &uDummy); AssertRCReturn(rc, rc);
3254 rc |= SSMR3GetU32(pSSM, &uDummy); AssertRCReturn(rc, rc);
3255 AssertRCReturn(rc, rc);
3256 }
3257 }
3258
3259 /*
3260 * Load TPR patching data.
3261 */
3262 if (uVersion >= HM_SAVED_STATE_VERSION_TPR_PATCHING)
3263 {
3264 rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pGuestPatchMem);
3265 rc |= SSMR3GetGCPtr(pSSM, &pVM->hm.s.pFreeGuestPatchMem);
3266 rc |= SSMR3GetU32(pSSM, &pVM->hm.s.cbGuestPatchMem);
3267
3268 /* Fetch all TPR patch records. */
3269 rc |= SSMR3GetU32(pSSM, &pVM->hm.s.cPatches);
3270 AssertRCReturn(rc, rc);
3271 for (uint32_t i = 0; i < pVM->hm.s.cPatches; i++)
3272 {
3273 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
3274 rc = SSMR3GetU32(pSSM, &pPatch->Core.Key);
3275 rc |= SSMR3GetMem(pSSM, pPatch->aOpcode, sizeof(pPatch->aOpcode));
3276 rc |= SSMR3GetU32(pSSM, &pPatch->cbOp);
3277 rc |= SSMR3GetMem(pSSM, pPatch->aNewOpcode, sizeof(pPatch->aNewOpcode));
3278 rc |= SSMR3GetU32(pSSM, &pPatch->cbNewOp);
3279 rc |= SSMR3GetU32(pSSM, (uint32_t *)&pPatch->enmType);
3280
3281 if (pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT)
3282 pVM->hm.s.fTPRPatchingActive = true;
3283 Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fTPRPatchingActive == false);
3284
3285 rc |= SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
3286 rc |= SSMR3GetU32(pSSM, &pPatch->uDstOperand);
3287 rc |= SSMR3GetU32(pSSM, &pPatch->cFaults);
3288 rc |= SSMR3GetU32(pSSM, &pPatch->pJumpTarget);
3289 AssertRCReturn(rc, rc);
3290
3291 LogFlow(("hmR3Load: patch %d\n", i));
3292 LogFlow(("Key = %x\n", pPatch->Core.Key));
3293 LogFlow(("cbOp = %d\n", pPatch->cbOp));
3294 LogFlow(("cbNewOp = %d\n", pPatch->cbNewOp));
3295 LogFlow(("type = %d\n", pPatch->enmType));
3296 LogFlow(("srcop = %d\n", pPatch->uSrcOperand));
3297 LogFlow(("dstop = %d\n", pPatch->uDstOperand));
3298 LogFlow(("cFaults = %d\n", pPatch->cFaults));
3299 LogFlow(("target = %x\n", pPatch->pJumpTarget));
3300
3301 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
3302 AssertRCReturn(rc, rc);
3303 }
3304 }
3305
3306 return VINF_SUCCESS;
3307}
3308
3309
3310/**
3311 * Displays HM info.
3312 *
3313 * @param pVM The cross context VM structure.
3314 * @param pHlp The info helper functions.
3315 * @param pszArgs Arguments, ignored.
3316 */
3317static DECLCALLBACK(void) hmR3Info(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3318{
3319 NOREF(pszArgs);
3320 PVMCPU pVCpu = VMMGetCpu(pVM);
3321 if (!pVCpu)
3322 pVCpu = &pVM->aCpus[0];
3323
3324 if (HMIsEnabled(pVM))
3325 {
3326 if (pVM->hm.s.vmx.fSupported)
3327 pHlp->pfnPrintf(pHlp, "CPU[%u]: VT-x info:\n", pVCpu->idCpu);
3328 else
3329 pHlp->pfnPrintf(pHlp, "CPU[%u]: AMD-V info:\n", pVCpu->idCpu);
3330 pHlp->pfnPrintf(pHlp, " HM error = %#x (%u)\n", pVCpu->hm.s.u32HMError, pVCpu->hm.s.u32HMError);
3331 pHlp->pfnPrintf(pHlp, " rcLastExitToR3 = %Rrc\n", pVCpu->hm.s.rcLastExitToR3);
3332 if (pVM->hm.s.vmx.fSupported)
3333 {
3334 PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
3335 bool const fRealOnV86Active = pVmcsInfo->RealMode.fRealOnV86Active;
3336 bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs;
3337
3338 pHlp->pfnPrintf(pHlp, " %s VMCS active\n", fNstGstVmcsActive ? "Nested-guest" : "Guest");
3339 pHlp->pfnPrintf(pHlp, " Real-on-v86 active = %RTbool\n", fRealOnV86Active);
3340 if (fRealOnV86Active)
3341 {
3342 pHlp->pfnPrintf(pHlp, " EFlags = %#x\n", pVmcsInfo->RealMode.Eflags.u32);
3343 pHlp->pfnPrintf(pHlp, " Attr CS = %#x\n", pVmcsInfo->RealMode.AttrCS.u);
3344 pHlp->pfnPrintf(pHlp, " Attr SS = %#x\n", pVmcsInfo->RealMode.AttrSS.u);
3345 pHlp->pfnPrintf(pHlp, " Attr DS = %#x\n", pVmcsInfo->RealMode.AttrDS.u);
3346 pHlp->pfnPrintf(pHlp, " Attr ES = %#x\n", pVmcsInfo->RealMode.AttrES.u);
3347 pHlp->pfnPrintf(pHlp, " Attr FS = %#x\n", pVmcsInfo->RealMode.AttrFS.u);
3348 pHlp->pfnPrintf(pHlp, " Attr GS = %#x\n", pVmcsInfo->RealMode.AttrGS.u);
3349 }
3350 }
3351 }
3352 else
3353 pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
3354}
3355
3356
3357/**
3358 * Displays the HM pending event.
3359 *
3360 * @param pVM The cross context VM structure.
3361 * @param pHlp The info helper functions.
3362 * @param pszArgs Arguments, ignored.
3363 */
3364static DECLCALLBACK(void) hmR3InfoEventPending(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3365{
3366 NOREF(pszArgs);
3367 PVMCPU pVCpu = VMMGetCpu(pVM);
3368 if (!pVCpu)
3369 pVCpu = &pVM->aCpus[0];
3370
3371 if (HMIsEnabled(pVM))
3372 {
3373 pHlp->pfnPrintf(pHlp, "CPU[%u]: HM event (fPending=%RTbool)\n", pVCpu->idCpu, pVCpu->hm.s.Event.fPending);
3374 if (pVCpu->hm.s.Event.fPending)
3375 {
3376 pHlp->pfnPrintf(pHlp, " u64IntInfo = %#RX64\n", pVCpu->hm.s.Event.u64IntInfo);
3377 pHlp->pfnPrintf(pHlp, " u32ErrCode = %#RX64\n", pVCpu->hm.s.Event.u32ErrCode);
3378 pHlp->pfnPrintf(pHlp, " cbInstr = %u bytes\n", pVCpu->hm.s.Event.cbInstr);
3379 pHlp->pfnPrintf(pHlp, " GCPtrFaultAddress = %#RGp\n", pVCpu->hm.s.Event.GCPtrFaultAddress);
3380 }
3381 }
3382 else
3383 pHlp->pfnPrintf(pHlp, "HM is not enabled for this VM!\n");
3384}
3385
3386
3387/**
3388 * Displays the SVM nested-guest VMCB cache.
3389 *
3390 * @param pVM The cross context VM structure.
3391 * @param pHlp The info helper functions.
3392 * @param pszArgs Arguments, ignored.
3393 */
3394static DECLCALLBACK(void) hmR3InfoSvmNstGstVmcbCache(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
3395{
3396 NOREF(pszArgs);
3397 PVMCPU pVCpu = VMMGetCpu(pVM);
3398 if (!pVCpu)
3399 pVCpu = &pVM->aCpus[0];
3400
3401 bool const fSvmEnabled = HMR3IsSvmEnabled(pVM->pUVM);
3402 if ( fSvmEnabled
3403 && pVM->cpum.ro.GuestFeatures.fSvm)
3404 {
3405 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
3406 pHlp->pfnPrintf(pHlp, "CPU[%u]: HM SVM nested-guest VMCB cache\n", pVCpu->idCpu);
3407 pHlp->pfnPrintf(pHlp, " fCacheValid = %#RTbool\n", pVmcbNstGstCache->fCacheValid);
3408 pHlp->pfnPrintf(pHlp, " u16InterceptRdCRx = %#RX16\n", pVmcbNstGstCache->u16InterceptRdCRx);
3409 pHlp->pfnPrintf(pHlp, " u16InterceptWrCRx = %#RX16\n", pVmcbNstGstCache->u16InterceptWrCRx);
3410 pHlp->pfnPrintf(pHlp, " u16InterceptRdDRx = %#RX16\n", pVmcbNstGstCache->u16InterceptRdDRx);
3411 pHlp->pfnPrintf(pHlp, " u16InterceptWrDRx = %#RX16\n", pVmcbNstGstCache->u16InterceptWrDRx);
3412 pHlp->pfnPrintf(pHlp, " u16PauseFilterThreshold = %#RX16\n", pVmcbNstGstCache->u16PauseFilterThreshold);
3413 pHlp->pfnPrintf(pHlp, " u16PauseFilterCount = %#RX16\n", pVmcbNstGstCache->u16PauseFilterCount);
3414 pHlp->pfnPrintf(pHlp, " u32InterceptXcpt = %#RX32\n", pVmcbNstGstCache->u32InterceptXcpt);
3415 pHlp->pfnPrintf(pHlp, " u64InterceptCtrl = %#RX64\n", pVmcbNstGstCache->u64InterceptCtrl);
3416 pHlp->pfnPrintf(pHlp, " u64TSCOffset = %#RX64\n", pVmcbNstGstCache->u64TSCOffset);
3417 pHlp->pfnPrintf(pHlp, " fVIntrMasking = %RTbool\n", pVmcbNstGstCache->fVIntrMasking);
3418 pHlp->pfnPrintf(pHlp, " fNestedPaging = %RTbool\n", pVmcbNstGstCache->fNestedPaging);
3419 pHlp->pfnPrintf(pHlp, " fLbrVirt = %RTbool\n", pVmcbNstGstCache->fLbrVirt);
3420 }
3421 else
3422 {
3423 if (!fSvmEnabled)
3424 pHlp->pfnPrintf(pHlp, "HM SVM is not enabled for this VM!\n");
3425 else
3426 pHlp->pfnPrintf(pHlp, "SVM feature is not exposed to the guest!\n");
3427 }
3428}
3429
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette