Changeset 107801 in vbox
- Timestamp:
- Jan 16, 2025 12:23:19 AM (3 months ago)
- svn:sync-xref-src-repo-rev:
- 166943
- Location:
- trunk/src/VBox
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Main/src-client/ConsoleImplConfigX86.cpp
r107267 r107801 991 991 InsertConfigNode(pRoot, "NEM", &pNEM); 992 992 InsertConfigInteger(pNEM, "Allow64BitGuests", fIsGuest64Bit); 993 994 InsertConfigInteger(pNEM, "IBPBOnVMExit", fIBPBOnVMExit); 995 InsertConfigInteger(pNEM, "IBPBOnVMEntry", fIBPBOnVMEntry); 996 InsertConfigInteger(pNEM, "L1DFlushOnSched", fL1DFlushOnSched); 997 InsertConfigInteger(pNEM, "L1DFlushOnVMEntry", fL1DFlushOnVMEntry); 998 InsertConfigInteger(pNEM, "MDSClearOnSched", fMDSClearOnSched); 999 InsertConfigInteger(pNEM, "MDSClearOnVMEntry", fMDSClearOnVMEntry); 993 1000 994 1001 /* -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r106061 r107801 758 758 } 759 759 760 #if defined(RT_ARCH_AMD64) ||defined(RT_ARCH_X86) 760 #if defined(RT_ARCH_AMD64) ||defined(RT_ARCH_X86) /* This section is duplicated in nemR3InitFinalizeSpecCtrl. */ 761 761 /* 762 762 * Check if L1D flush is needed/possible. -
trunk/src/VBox/VMM/VMMR3/NEMR3.cpp
r107227 r107801 46 46 #include "NEMInternal.h" 47 47 #include <VBox/vmm/vm.h> 48 #include <VBox/vmm/vmcc.h> 48 49 #include <VBox/vmm/uvm.h> 49 50 #include <VBox/err.h> … … 106 107 "|VTimerInterrupt" 107 108 #endif 109 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) 110 "|IBPBOnVMExit" 111 "|IBPBOnVMEntry" 112 "|L1DFlushOnSched" 113 "|L1DFlushOnVMEntry" 114 "|MDSClearOnSched" 115 "|MDSClearOnVMEntry" 116 #endif 108 117 , 109 118 "" /* pszValidNodes */, "NEM" /* pszWho */, 0 /* uInstance */); … … 139 148 pVCpu->nem.s.fTrapXcptGpForLovelyMesaDrv = f; 140 149 } 150 151 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) 152 /** @cfgm{/NEM/IBPBOnVMExit, bool} 153 * Costly paranoia setting. */ 154 rc = CFGMR3QueryBoolDef(pCfgNem, "IBPBOnVMExit", &f, false); 155 AssertLogRelRCReturn(rc, rc); 156 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fIbpbOnVmExit = f); 157 158 /** @cfgm{/NEM/IBPBOnVMEntry, bool} 159 * Costly paranoia setting. */ 160 rc = CFGMR3QueryBoolDef(pCfgNem, "IBPBOnVMEntry", &f, false); 161 AssertLogRelRCReturn(rc, rc); 162 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fIbpbOnVmEntry = f); 163 164 /** @cfgm{/NEM/L1DFlushOnSched, bool, true} 165 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */ 166 rc = CFGMR3QueryBoolDef(pCfgNem, "L1DFlushOnSched", &f, true); 167 AssertLogRelRCReturn(rc, rc); 168 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fL1dFlushOnSched = f); 169 170 /** @cfgm{/NEM/L1DFlushOnVMEntry, bool} 171 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */ 172 rc = CFGMR3QueryBoolDef(pCfgNem, "L1DFlushOnVMEntry", &f, false); 173 AssertLogRelRCReturn(rc, rc); 174 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fL1dFlushOnVmEntry = f); 175 176 /* Disable L1DFlushOnSched if L1DFlushOnVMEntry is enabled. */ 177 PVMCPU const pVCpu0 = pVM->apCpusR3[0]; 178 if (pVCpu0->nem.s.fL1dFlushOnVmEntry) 179 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fL1dFlushOnSched = false); 180 181 /** @cfgm{/NEM/MDSClearOnSched, bool, true} 182 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround, 183 * ignored on CPUs that aren't affected. */ 184 rc = CFGMR3QueryBoolDef(pCfgNem, "MDSClearOnSched", &f, true); 185 AssertLogRelRCReturn(rc, rc); 186 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fMdsClearOnSched = f); 187 188 /** @cfgm{/NEM/MDSClearOnVmEntry, bool, false} 189 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround, 190 * ignored on CPUs that aren't affected. */ 191 rc = CFGMR3QueryBoolDef(pCfgNem, "MDSClearOnVmEntry", &f, false); 192 AssertLogRelRCReturn(rc, rc); 193 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fMdsClearOnVmEntry = f); 194 195 /* Disable MDSClearOnSched if MDSClearOnVmEntry is enabled. */ 196 if (pVCpu0->nem.s.fMdsClearOnVmEntry) 197 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fMdsClearOnSched = false); 198 #endif /* VBOX_VMM_TARGET_X86 */ 141 199 142 200 #ifdef VBOX_VMM_TARGET_ARMV8 … … 219 277 220 278 279 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) 280 /** 281 * Finalize configuration related to spectre and such. 282 * 283 * This can be called explicitly by the native code after it has seeded the 284 * necessary host information to CPUM, or/and it will be called by 285 * NEMR3InitAfterCPUM(). 286 * 287 * @note This code is also duplicated in hmR3InitFinalizeR3(). 288 */ 289 DECLHIDDEN(void) nemR3InitFinalizeSpecCtrl(PVM pVM) 290 { 291 /* 292 * Check if L1D flush is needed/possible. 293 */ 294 if ( !g_CpumHostFeatures.s.fFlushCmd 295 || g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem 296 || g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Core7_End 297 || g_CpumHostFeatures.s.fArchVmmNeedNotFlushL1d 298 || g_CpumHostFeatures.s.fArchRdclNo) 299 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fL1dFlushOnSched = pVCpu->nem.s.fL1dFlushOnVmEntry = false); 300 301 /* 302 * Check if MDS flush is needed/possible. 303 * On atoms and knight family CPUs, we will only allow clearing on scheduling. 304 */ 305 if ( !g_CpumHostFeatures.s.fMdsClear 306 || g_CpumHostFeatures.s.fArchMdsNo) 307 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fMdsClearOnSched = pVCpu->nem.s.fMdsClearOnVmEntry = false); 308 else if ( ( g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Atom_Airmount 309 && g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Atom_End) 310 || ( g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Phi_KnightsLanding 311 && g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Phi_End)) 312 { 313 PVMCPU const pVCpu0 = pVM->apCpusR3[0]; 314 if (!pVCpu0->nem.s.fMdsClearOnSched) 315 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fMdsClearOnSched = pVCpu->nem.s.fMdsClearOnVmEntry); 316 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fMdsClearOnVmEntry = false); 317 } 318 else if ( g_CpumHostFeatures.s.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem 319 || g_CpumHostFeatures.s.enmMicroarch >= kCpumMicroarch_Intel_Core7_End) 320 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->nem.s.fMdsClearOnSched = pVCpu->nem.s.fMdsClearOnVmEntry = false); 321 } 322 #endif 323 324 221 325 /** 222 326 * Perform initialization that depends on CPUM working. … … 235 339 * Do native after-CPUM init. 236 340 */ 341 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) 342 nemR3InitFinalizeSpecCtrl(pVM); 343 #endif 237 344 #ifdef VBOX_WITH_NATIVE_NEM 238 345 rc = nemR3NativeInitAfterCPUM(pVM); -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp
r107706 r107801 3676 3676 { 3677 3677 TMNotifyStartOfExecution(pVM, pVCpu); 3678 3679 3678 Assert(!pVCpu->nem.s.fCtxChanged); 3679 3680 if (!(pVCpu->nem.s.fMdsClearOnVmEntry | pVCpu->nem.s.fMdsClearOnVmEntry)) 3681 { /* likely*/ } 3682 else 3683 { 3684 uint16_t u16 = ASMGetDS(); 3685 __asm__ __volatile__("verw %0" : "=m" (u16) : "0" (u16)); 3686 } 3687 3680 3688 hv_return_t hrc; 3681 3689 if (hv_vcpu_run_until) /** @todo Configur the deadline dynamically based on when the next timer triggers. */ -
trunk/src/VBox/VMM/include/NEMInternal.h
r107308 r107801 424 424 /** Whether \#DE needs to be intercepted for GIM. */ 425 425 bool fGCMTrapXcptDE : 1; 426 #ifdef VBOX_VMM_TARGET_X86 427 /** Set if indirect branch prediction barrier on VM exit. */ 428 bool fIbpbOnVmExit : 1; 429 /** Set if indirect branch prediction barrier on VM entry. */ 430 bool fIbpbOnVmEntry : 1; 431 /** Set if level 1 data cache should be flushed on VM entry. */ 432 bool fL1dFlushOnVmEntry : 1; 433 /** Set if level 1 data cache should be flushed on EMT scheduling. */ 434 bool fL1dFlushOnSched : 1; 435 /** Set if MDS related buffers should be cleared on VM entry. */ 436 bool fMdsClearOnVmEntry : 1; 437 /** Set if MDS related buffers should be cleared on EMT scheduling. */ 438 bool fMdsClearOnSched : 1; 439 #endif 426 440 427 441 #if defined(RT_OS_LINUX)
Note:
See TracChangeset
for help on using the changeset viewer.