VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3.cpp@ 107924

Last change on this file since 107924 was 107801, checked in by vboxsync, 4 weeks ago

VMM/NEM,Main: Feed the cpu bug mitigation parameters to both HM and NEM. jiraref:VBP-947

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 24.1 KB
Line 
1/* $Id: NEMR3.cpp 107801 2025-01-16 00:23:19Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager.
4 */
5
6/*
7 * Copyright (C) 2018-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_nem NEM - Native Execution Manager.
29 *
30 * This is an alternative execution manage to HM and raw-mode. On one host
31 * (Windows) we're forced to use this, on the others we just do it because we
32 * can. Since this is host specific in nature, information about an
33 * implementation is contained in the NEMR3Native-xxxx.cpp files.
34 *
35 * @ref pg_nem_win
36 */
37
38
39/*********************************************************************************************************************************
40* Header Files *
41*********************************************************************************************************************************/
42#define LOG_GROUP LOG_GROUP_NEM
43#include <VBox/vmm/dbgf.h>
44#include <VBox/vmm/nem.h>
45#include <VBox/vmm/gim.h>
46#include "NEMInternal.h"
47#include <VBox/vmm/vm.h>
48#include <VBox/vmm/vmcc.h>
49#include <VBox/vmm/uvm.h>
50#include <VBox/err.h>
51
52#include <iprt/asm.h>
53#include <iprt/string.h>
54
55
56
57/**
58 * Basic init and configuration reading.
59 *
60 * Always call NEMR3Term after calling this.
61 *
62 * @returns VBox status code.
63 * @param pVM The cross context VM structure.
64 */
65VMMR3_INT_DECL(int) NEMR3InitConfig(PVM pVM)
66{
67 LogFlow(("NEMR3Init\n"));
68
69 /*
70 * Assert alignment and sizes.
71 */
72 AssertCompileMemberAlignment(VM, nem.s, 64);
73 AssertCompile(sizeof(pVM->nem.s) <= sizeof(pVM->nem.padding));
74
75 /*
76 * Initialize state info so NEMR3Term will always be happy.
77 * No returning prior to setting magics!
78 */
79 pVM->nem.s.u32Magic = NEM_MAGIC;
80 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
81 {
82 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
83 pVCpu->nem.s.u32Magic = NEMCPU_MAGIC;
84 }
85
86 /*
87 * Read configuration.
88 */
89 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
90
91 /*
92 * Validate the NEM settings.
93 */
94 int rc = CFGMR3ValidateConfig(pCfgNem,
95 "/NEM/",
96 "Enabled"
97 "|Allow64BitGuests"
98 "|LovelyMesaDrvWorkaround"
99#ifdef RT_OS_WINDOWS
100 "|UseRing0Runloop"
101#elif defined(RT_OS_DARWIN)
102 "|VmxPleGap"
103 "|VmxPleWindow"
104 "|VmxLbr"
105#endif
106#ifdef VBOX_VMM_TARGET_ARMV8
107 "|VTimerInterrupt"
108#endif
109#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
110 "|IBPBOnVMExit"
111 "|IBPBOnVMEntry"
112 "|L1DFlushOnSched"
113 "|L1DFlushOnVMEntry"
114 "|MDSClearOnSched"
115 "|MDSClearOnVMEntry"
116#endif
117 ,
118 "" /* pszValidNodes */, "NEM" /* pszWho */, 0 /* uInstance */);
119 if (RT_FAILURE(rc))
120 return rc;
121
122 /** @cfgm{/NEM/NEMEnabled, bool, true}
123 * Whether NEM is enabled. */
124 rc = CFGMR3QueryBoolDef(pCfgNem, "Enabled", &pVM->nem.s.fEnabled, true);
125 AssertLogRelRCReturn(rc, rc);
126
127
128#ifdef VBOX_WITH_64_BITS_GUESTS
129 /** @cfgm{/NEM/Allow64BitGuests, bool, 32-bit:false, 64-bit:true}
130 * Enables AMD64 CPU features.
131 * On 32-bit hosts this isn't default and require host CPU support. 64-bit hosts
132 * already have the support. */
133 rc = CFGMR3QueryBoolDef(pCfgNem, "Allow64BitGuests", &pVM->nem.s.fAllow64BitGuests, HC_ARCH_BITS == 64);
134 AssertLogRelRCReturn(rc, rc);
135#else
136 pVM->nem.s.fAllow64BitGuests = false;
137#endif
138
139 /** @cfgm{/NEM/LovelyMesaDrvWorkaround, bool, false}
140 * Workaround for mesa vmsvga 3d driver making incorrect assumptions about
141 * the hypervisor it is running under. */
142 bool f;
143 rc = CFGMR3QueryBoolDef(pCfgNem, "LovelyMesaDrvWorkaround", &f, false);
144 AssertLogRelRCReturn(rc, rc);
145 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
146 {
147 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
148 pVCpu->nem.s.fTrapXcptGpForLovelyMesaDrv = f;
149 }
150
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152 /** @cfgm{/NEM/IBPBOnVMExit, bool}
153 * Costly paranoia setting. */
154 rc = CFGMR3QueryBoolDef(pCfgNem, "IBPBOnVMExit", &f, false);
155 AssertLogRelRCReturn(rc, rc);
156 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fIbpbOnVmExit = f);
157
158 /** @cfgm{/NEM/IBPBOnVMEntry, bool}
159 * Costly paranoia setting. */
160 rc = CFGMR3QueryBoolDef(pCfgNem, "IBPBOnVMEntry", &f, false);
161 AssertLogRelRCReturn(rc, rc);
162 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fIbpbOnVmEntry = f);
163
164 /** @cfgm{/NEM/L1DFlushOnSched, bool, true}
165 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */
166 rc = CFGMR3QueryBoolDef(pCfgNem, "L1DFlushOnSched", &f, true);
167 AssertLogRelRCReturn(rc, rc);
168 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fL1dFlushOnSched = f);
169
170 /** @cfgm{/NEM/L1DFlushOnVMEntry, bool}
171 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */
172 rc = CFGMR3QueryBoolDef(pCfgNem, "L1DFlushOnVMEntry", &f, false);
173 AssertLogRelRCReturn(rc, rc);
174 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fL1dFlushOnVmEntry = f);
175
176 /* Disable L1DFlushOnSched if L1DFlushOnVMEntry is enabled. */
177 PVMCPU const pVCpu0 = pVM->apCpusR3[0];
178 if (pVCpu0->nem.s.fL1dFlushOnVmEntry)
179 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fL1dFlushOnSched = false);
180
181 /** @cfgm{/NEM/MDSClearOnSched, bool, true}
182 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround,
183 * ignored on CPUs that aren't affected. */
184 rc = CFGMR3QueryBoolDef(pCfgNem, "MDSClearOnSched", &f, true);
185 AssertLogRelRCReturn(rc, rc);
186 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fMdsClearOnSched = f);
187
188 /** @cfgm{/NEM/MDSClearOnVmEntry, bool, false}
189 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround,
190 * ignored on CPUs that aren't affected. */
191 rc = CFGMR3QueryBoolDef(pCfgNem, "MDSClearOnVmEntry", &f, false);
192 AssertLogRelRCReturn(rc, rc);
193 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fMdsClearOnVmEntry = f);
194
195 /* Disable MDSClearOnSched if MDSClearOnVmEntry is enabled. */
196 if (pVCpu0->nem.s.fMdsClearOnVmEntry)
197 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fMdsClearOnSched = false);
198#endif /* VBOX_VMM_TARGET_X86 */
199
200#ifdef VBOX_VMM_TARGET_ARMV8
201 /** @cfgm{/NEM/VTimerInterrupt, uint32_t}
202 * Specifies the interrupt identifier for the VTimer. */
203 rc = CFGMR3QueryU32(pCfgNem, "VTimerInterrupt", &pVM->nem.s.u32GicPpiVTimer);
204 AssertLogRelRCReturn(rc, rc);
205#endif
206
207 return VINF_SUCCESS;
208}
209
210
211/**
212 * This is called by HMR3Init() when HM cannot be used.
213 *
214 * Sets VM::bMainExecutionEngine to VM_EXEC_ENGINE_NATIVE_API if we can use a
215 * native hypervisor API to execute the VM.
216 *
217 * @returns VBox status code.
218 * @param pVM The cross context VM structure.
219 * @param fFallback Whether this is a fallback call. Cleared if the VM is
220 * configured to use NEM instead of HM.
221 * @param fForced Whether /HM/HMForced was set. If set and we fail to
222 * enable NEM, we'll return a failure status code.
223 * Otherwise we'll assume HMR3Init falls back on raw-mode.
224 */
225VMMR3_INT_DECL(int) NEMR3Init(PVM pVM, bool fFallback, bool fForced)
226{
227 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API);
228 int rc;
229 if (pVM->nem.s.fEnabled)
230 {
231#ifdef VBOX_WITH_NATIVE_NEM
232 rc = nemR3NativeInit(pVM, fFallback, fForced);
233 ASMCompilerBarrier(); /* May have changed bMainExecutionEngine. */
234#else
235 RT_NOREF(fFallback);
236 rc = VINF_SUCCESS;
237#endif
238 if (RT_SUCCESS(rc))
239 {
240 if (pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
241 {
242#ifndef VBOX_WITH_HWVIRT /* Don't complain if there are no other alternatives. */
243# ifdef RT_OS_WINDOWS /* The WHv* API is extremely slow at handling VM exits. The AppleHv and
244 KVM APIs are much faster, thus the different mode name. :-) */
245 LogRel(("NEM:\n"
246 "NEM: NEMR3Init: Snail execution mode is active!\n"
247 "NEM: Note! VirtualBox is not able to run at its full potential in this execution mode.\n"
248 "NEM: To see VirtualBox run at max speed you need to disable all Windows features\n"
249 "NEM: making use of Hyper-V. That is a moving target, so google how and carefully\n"
250 "NEM: consider the consequences of disabling these features.\n"
251 "NEM:\n"));
252# else
253 LogRel(("NEM:\n"
254 "NEM: NEMR3Init: Turtle execution mode is active!\n"
255 "NEM: Note! VirtualBox is not able to run at its full potential in this execution mode.\n"
256 "NEM:\n"));
257# endif
258#endif
259 }
260 else
261 {
262 LogRel(("NEM: NEMR3Init: Not available.\n"));
263 if (fForced)
264 rc = VERR_NEM_NOT_AVAILABLE;
265 }
266 }
267 else
268 LogRel(("NEM: NEMR3Init: Native init failed: %Rrc.\n", rc));
269 }
270 else
271 {
272 LogRel(("NEM: NEMR3Init: Disabled.\n"));
273 rc = fForced ? VERR_NEM_NOT_ENABLED : VINF_SUCCESS;
274 }
275 return rc;
276}
277
278
279#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
280/**
281 * Finalize configuration related to spectre and such.
282 *
283 * This can be called explicitly by the native code after it has seeded the
284 * necessary host information to CPUM, or/and it will be called by
285 * NEMR3InitAfterCPUM().
286 *
287 * @note This code is also duplicated in hmR3InitFinalizeR3().
288 */
289DECLHIDDEN(void) nemR3InitFinalizeSpecCtrl(PVM pVM)
290{
291 /*
292 * Check if L1D flush is needed/possible.
293 */
294 if ( !g_CpumHostFeatures.s.fFlushCmd
295 || g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem
296 || g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Core7_End
297 || g_CpumHostFeatures.s.fArchVmmNeedNotFlushL1d
298 || g_CpumHostFeatures.s.fArchRdclNo)
299 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fL1dFlushOnSched = pVCpu->nem.s.fL1dFlushOnVmEntry = false);
300
301 /*
302 * Check if MDS flush is needed/possible.
303 * On atoms and knight family CPUs, we will only allow clearing on scheduling.
304 */
305 if ( !g_CpumHostFeatures.s.fMdsClear
306 || g_CpumHostFeatures.s.fArchMdsNo)
307 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fMdsClearOnSched = pVCpu->nem.s.fMdsClearOnVmEntry = false);
308 else if ( ( g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Atom_Airmount
309 && g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Atom_End)
310 || ( g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Phi_KnightsLanding
311 && g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Phi_End))
312 {
313 PVMCPU const pVCpu0 = pVM->apCpusR3[0];
314 if (!pVCpu0->nem.s.fMdsClearOnSched)
315 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fMdsClearOnSched = pVCpu->nem.s.fMdsClearOnVmEntry);
316 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fMdsClearOnVmEntry = false);
317 }
318 else if ( g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem
319 || g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Core7_End)
320 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fMdsClearOnSched = pVCpu->nem.s.fMdsClearOnVmEntry = false);
321}
322#endif
323
324
325/**
326 * Perform initialization that depends on CPUM working.
327 *
328 * This is a noop if NEM wasn't activated by a previous NEMR3Init() call.
329 *
330 * @returns VBox status code.
331 * @param pVM The cross context VM structure.
332 */
333VMMR3_INT_DECL(int) NEMR3InitAfterCPUM(PVM pVM)
334{
335 int rc = VINF_SUCCESS;
336 if (pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
337 {
338 /*
339 * Do native after-CPUM init.
340 */
341#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
342 nemR3InitFinalizeSpecCtrl(pVM);
343#endif
344#ifdef VBOX_WITH_NATIVE_NEM
345 rc = nemR3NativeInitAfterCPUM(pVM);
346#else
347 RT_NOREF(pVM);
348#endif
349 }
350 return rc;
351}
352
353
354/**
355 * Called when a init phase has completed.
356 *
357 * @returns VBox status code.
358 * @param pVM The cross context VM structure.
359 * @param enmWhat The phase that completed.
360 */
361VMMR3_INT_DECL(int) NEMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
362{
363 /*
364 * Check if GIM needs #UD, since that applies to everyone.
365 */
366 if (enmWhat == VMINITCOMPLETED_RING3)
367 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
368 {
369 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
370 pVCpu->nem.s.fGIMTrapXcptUD = GIMShouldTrapXcptUD(pVCpu);
371 }
372
373 /*
374 * Call native code.
375 */
376 int rc = VINF_SUCCESS;
377#ifdef VBOX_WITH_NATIVE_NEM
378 if (pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
379 rc = nemR3NativeInitCompleted(pVM, enmWhat);
380#else
381 RT_NOREF(pVM, enmWhat);
382#endif
383 return rc;
384}
385
386
387/**
388 *
389 * @returns VBox status code.
390 * @param pVM The cross context VM structure.
391 */
392VMMR3_INT_DECL(int) NEMR3Term(PVM pVM)
393{
394 AssertReturn(pVM->nem.s.u32Magic == NEM_MAGIC, VERR_WRONG_ORDER);
395 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
396 AssertReturn(pVM->apCpusR3[idCpu]->nem.s.u32Magic == NEMCPU_MAGIC, VERR_WRONG_ORDER);
397
398 /* Do native termination. */
399 int rc = VINF_SUCCESS;
400#ifdef VBOX_WITH_NATIVE_NEM
401 if (pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
402 rc = nemR3NativeTerm(pVM);
403#endif
404
405 /* Mark it as terminated. */
406 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
407 {
408 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
409 pVCpu->nem.s.u32Magic = NEMCPU_MAGIC_DEAD;
410 }
411 pVM->nem.s.u32Magic = NEM_MAGIC_DEAD;
412 return rc;
413}
414
415/**
416 * External interface for querying whether native execution API is used.
417 *
418 * @returns true if NEM is being used, otherwise false.
419 * @param pUVM The user mode VM handle.
420 * @sa HMR3IsEnabled
421 */
422VMMR3DECL(bool) NEMR3IsEnabled(PUVM pUVM)
423{
424 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
425 PVM pVM = pUVM->pVM;
426 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
427 return VM_IS_NEM_ENABLED(pVM);
428}
429
430
431/**
432 * The VM is being reset.
433 *
434 * @param pVM The cross context VM structure.
435 */
436VMMR3_INT_DECL(void) NEMR3Reset(PVM pVM)
437{
438#ifdef VBOX_WITH_NATIVE_NEM
439 if (pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
440 nemR3NativeReset(pVM);
441#else
442 RT_NOREF(pVM);
443#endif
444}
445
446
447/**
448 * Resets a virtual CPU.
449 *
450 * Used to bring up secondary CPUs on SMP as well as CPU hot plugging.
451 *
452 * @param pVCpu The cross context virtual CPU structure to reset.
453 * @param fInitIpi Set if being reset due to INIT IPI.
454 */
455VMMR3_INT_DECL(void) NEMR3ResetCpu(PVMCPU pVCpu, bool fInitIpi)
456{
457#ifdef VBOX_WITH_NATIVE_NEM
458 if (pVCpu->pVMR3->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API)
459 nemR3NativeResetCpu(pVCpu, fInitIpi);
460#else
461 RT_NOREF(pVCpu, fInitIpi);
462#endif
463}
464
465
466/**
467 * Indicates to TM that TMTSCMODE_NATIVE_API should be used for TSC.
468 *
469 * @returns true if TMTSCMODE_NATIVE_API must be used, otherwise @c false.
470 * @param pVM The cross context VM structure.
471 */
472VMMR3_INT_DECL(bool) NEMR3NeedSpecialTscMode(PVM pVM)
473{
474#ifdef VBOX_WITH_NATIVE_NEM
475 if (VM_IS_NEM_ENABLED(pVM))
476 return true;
477#else
478 RT_NOREF(pVM);
479#endif
480 return false;
481}
482
483
484/**
485 * Gets the name of a generic NEM exit code.
486 *
487 * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
488 * @param uExit The NEM exit to name.
489 */
490VMMR3DECL(const char *) NEMR3GetExitName(uint32_t uExit)
491{
492 switch ((NEMEXITTYPE)uExit)
493 {
494 case NEMEXITTYPE_INTTERRUPT_WINDOW: return "NEM interrupt window";
495 case NEMEXITTYPE_HALT: return "NEM halt";
496
497 case NEMEXITTYPE_UNRECOVERABLE_EXCEPTION: return "NEM unrecoverable exception";
498 case NEMEXITTYPE_INVALID_VP_REGISTER_VALUE: return "NEM invalid vp register value";
499 case NEMEXITTYPE_XCPT_UD: return "NEM #UD";
500 case NEMEXITTYPE_XCPT_DB: return "NEM #DB";
501 case NEMEXITTYPE_XCPT_BP: return "NEM #BP";
502 case NEMEXITTYPE_CANCELED: return "NEM canceled";
503 case NEMEXITTYPE_MEMORY_ACCESS: return "NEM memory access";
504
505 case NEMEXITTYPE_INTERNAL_ERROR_EMULATION: return "NEM emulation IPE";
506 case NEMEXITTYPE_INTERNAL_ERROR_FATAL: return "NEM fatal IPE";
507 case NEMEXITTYPE_INTERRUPTED: return "NEM interrupted";
508 case NEMEXITTYPE_FAILED_ENTRY: return "NEM failed VT-x/AMD-V entry";
509
510 case NEMEXITTYPE_INVALID:
511 case NEMEXITTYPE_END:
512 break;
513 }
514
515 return NULL;
516}
517
518
519VMMR3_INT_DECL(VBOXSTRICTRC) NEMR3RunGC(PVM pVM, PVMCPU pVCpu)
520{
521 Assert(VM_IS_NEM_ENABLED(pVM));
522#ifdef VBOX_WITH_NATIVE_NEM
523 return nemR3NativeRunGC(pVM, pVCpu);
524#else
525 NOREF(pVM); NOREF(pVCpu);
526 return VERR_INTERNAL_ERROR_3;
527#endif
528}
529
530
531#ifndef VBOX_WITH_NATIVE_NEM
532VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
533{
534 RT_NOREF(pVM, pVCpu);
535 return false;
536}
537#endif
538
539
540VMMR3_INT_DECL(bool) NEMR3SetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
541{
542 Assert(VM_IS_NEM_ENABLED(pVM));
543#ifdef VBOX_WITH_NATIVE_NEM
544 return nemR3NativeSetSingleInstruction(pVM, pVCpu, fEnable);
545#else
546 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
547 return false;
548#endif
549}
550
551
552VMMR3_INT_DECL(void) NEMR3NotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
553{
554 AssertLogRelReturnVoid(VM_IS_NEM_ENABLED(pVM));
555#ifdef VBOX_WITH_NATIVE_NEM
556 nemR3NativeNotifyFF(pVM, pVCpu, fFlags);
557#else
558 RT_NOREF(pVM, pVCpu, fFlags);
559#endif
560}
561
562#ifndef VBOX_WITH_NATIVE_NEM
563
564VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
565{
566 RT_NOREF(pVCpu, fEnabled);
567}
568
569# ifdef VBOX_WITH_PGM_NEM_MODE
570
571VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
572{
573 RT_NOREF(pVM);
574 return false;
575}
576
577
578VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
579 void *pvBitmap, size_t cbBitmap)
580{
581 RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap);
582 AssertFailed();
583 return VERR_INTERNAL_ERROR_2;
584}
585
586
587VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
588 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
589{
590 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange);
591 AssertFailed();
592 return VERR_INTERNAL_ERROR_2;
593}
594
595# endif /* VBOX_WITH_PGM_NEM_MODE */
596#endif /* !VBOX_WITH_NATIVE_NEM */
597
598/**
599 * Notification callback from DBGF when interrupt breakpoints or generic debug
600 * event settings changes.
601 *
602 * DBGF will call NEMR3NotifyDebugEventChangedPerCpu on each CPU afterwards, this
603 * function is just updating the VM globals.
604 *
605 * @param pVM The VM cross context VM structure.
606 * @thread EMT(0)
607 */
608VMMR3_INT_DECL(void) NEMR3NotifyDebugEventChanged(PVM pVM)
609{
610 AssertLogRelReturnVoid(VM_IS_NEM_ENABLED(pVM));
611
612#ifdef VBOX_WITH_NATIVE_NEM
613 /* Interrupts. */
614 bool fUseDebugLoop = pVM->dbgf.ro.cSoftIntBreakpoints > 0
615 || pVM->dbgf.ro.cHardIntBreakpoints > 0;
616
617 /* CPU Exceptions. */
618 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_XCPT_FIRST;
619 !fUseDebugLoop && enmEvent <= DBGFEVENT_XCPT_LAST;
620 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
621 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
622
623 /* Common VM exits. */
624 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_FIRST;
625 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_LAST_COMMON;
626 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
627 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
628
629 /* Done. */
630 pVM->nem.s.fUseDebugLoop = nemR3NativeNotifyDebugEventChanged(pVM, fUseDebugLoop);
631#else
632 RT_NOREF(pVM);
633#endif
634}
635
636
637/**
638 * Follow up notification callback to NEMR3NotifyDebugEventChanged for each CPU.
639 *
640 * NEM uses this to combine the decision made NEMR3NotifyDebugEventChanged with
641 * per CPU settings.
642 *
643 * @param pVM The VM cross context VM structure.
644 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
645 */
646VMMR3_INT_DECL(void) NEMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu)
647{
648 AssertLogRelReturnVoid(VM_IS_NEM_ENABLED(pVM));
649
650#ifdef VBOX_WITH_NATIVE_NEM
651 pVCpu->nem.s.fUseDebugLoop = nemR3NativeNotifyDebugEventChangedPerCpu(pVM, pVCpu,
652 pVCpu->nem.s.fSingleInstruction | pVM->nem.s.fUseDebugLoop);
653#else
654 RT_NOREF(pVM, pVCpu);
655#endif
656}
657
658
659/**
660 * Disables a CPU ISA extension, like MONITOR/MWAIT.
661 *
662 * @returns VBox status code
663 * @param pVM The cross context VM structure.
664 * @param pszIsaExt The ISA extension name in the config tree.
665 */
666int nemR3DisableCpuIsaExt(PVM pVM, const char *pszIsaExt)
667{
668 /*
669 * Get IsaExts config node under CPUM.
670 */
671 PCFGMNODE pIsaExts = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/CPUM/IsaExts");
672 if (!pIsaExts)
673 {
674 int rc = CFGMR3InsertNode(CFGMR3GetRoot(pVM), "/CPUM/IsaExts", &pIsaExts);
675 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("CFGMR3InsertNode: rc=%Rrc pszIsaExt=%s\n", rc, pszIsaExt), rc);
676 }
677
678 /*
679 * Look for a value by the given name (pszIsaExt).
680 */
681 /* Integer values 1 (CPUMISAEXTCFG_ENABLED_SUPPORTED) and 9 (CPUMISAEXTCFG_ENABLED_PORTABLE) will be replaced. */
682 uint64_t u64Value;
683 int rc = CFGMR3QueryInteger(pIsaExts, pszIsaExt, &u64Value);
684 if (RT_SUCCESS(rc))
685 {
686 if (u64Value != 1 && u64Value != 9)
687 {
688 LogRel(("NEM: Not disabling IsaExt '%s', already configured with int value %lld\n", pszIsaExt, u64Value));
689 return VINF_SUCCESS;
690 }
691 CFGMR3RemoveValue(pIsaExts, pszIsaExt);
692 }
693 /* String value 'default', 'enabled' and 'portable' will be replaced. */
694 else if (rc == VERR_CFGM_NOT_INTEGER)
695 {
696 char szValue[32];
697 rc = CFGMR3QueryString(pIsaExts, pszIsaExt, szValue, sizeof(szValue));
698 AssertRCReturn(rc, VINF_SUCCESS);
699
700 if ( RTStrICmpAscii(szValue, "default") != 0
701 && RTStrICmpAscii(szValue, "def") != 0
702 && RTStrICmpAscii(szValue, "enabled") != 0
703 && RTStrICmpAscii(szValue, "enable") != 0
704 && RTStrICmpAscii(szValue, "on") != 0
705 && RTStrICmpAscii(szValue, "yes") != 0
706 && RTStrICmpAscii(szValue, "portable") != 0)
707 {
708 LogRel(("NEM: Not disabling IsaExt '%s', already configured with string value '%s'\n", pszIsaExt, szValue));
709 return VINF_SUCCESS;
710 }
711 CFGMR3RemoveValue(pIsaExts, pszIsaExt);
712 }
713 else
714 AssertLogRelMsgReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, ("CFGMR3QueryInteger: rc=%Rrc pszIsaExt=%s\n", rc, pszIsaExt),
715 VERR_NEM_IPE_8);
716
717 /*
718 * Insert the disabling value.
719 */
720 rc = CFGMR3InsertInteger(pIsaExts, pszIsaExt, 0 /* disabled */);
721 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("CFGMR3InsertInteger: rc=%Rrc pszIsaExt=%s\n", rc, pszIsaExt), rc);
722
723 return VINF_SUCCESS;
724}
725
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette