Changeset 69584 in vbox
- Timestamp:
- Nov 4, 2017 10:31:10 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 118934
- Location:
- trunk/src/VBox/VMM/tools
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/tools/MsrLinux.cpp
r69582 r69584 129 129 } 130 130 131 int PlatformMsrProberInit(VBMSRFNS *fnsMsr )131 int PlatformMsrProberInit(VBMSRFNS *fnsMsr, bool *pfAtomicMsrMod) 132 132 { 133 133 if (access(MSR_DEV_NAME, F_OK)) … … 148 148 fnsMsr->msrModify = linuxMsrProberModify; 149 149 fnsMsr->msrProberTerm = linuxMsrProberTerm; 150 *pfAtomicMsrMod = false; /* Can't modify/restore MSRs without trip to R3. */ 150 151 151 152 return VINF_SUCCESS; -
trunk/src/VBox/VMM/tools/MsrSup.cpp
r69583 r69584 48 48 } 49 49 50 int SupDrvMsrProberInit(VBMSRFNS *fnsMsr )50 int SupDrvMsrProberInit(VBMSRFNS *fnsMsr, bool *pfAtomicMsrMod) 51 51 { 52 52 int rc = SUPR3Init(NULL); … … 71 71 fnsMsr->msrModify = supMsrProberModify; 72 72 fnsMsr->msrProberTerm = supMsrProberTerm; 73 *pfAtomicMsrMod = true; 73 74 74 75 return VINF_SUCCESS; -
trunk/src/VBox/VMM/tools/VBoxCpuReport.cpp
r69582 r69584 80 80 /** MSR prober routines. */ 81 81 static VBMSRFNS g_MsrAcc; 82 82 /** Wheter MSR prober can read/modify/restore MSRs more or less 83 * atomically, without allowing other code to be executed. */ 84 static bool g_fAtomicMsrMod; 83 85 84 86 void vbCpuRepDebug(const char *pszMsg, ...) … … 2544 2546 break; 2545 2547 2548 /* KVM MSRs that are unsafe to touch. */ 2549 case 0x00000011: /* KVM */ 2550 case 0x00000012: /* KVM */ 2551 return VBCPUREPBADNESS_BOND_VILLAIN; 2552 2553 /* 2554 * The TSC is tricky -- writing it isn't a problem, but if we put back the original 2555 * value, we'll throw it out of whack. If we're on an SMP OS that uses the TSC for timing, 2556 * we'll likely kill it, especially if we can't do the modification very quickly. 2557 */ 2558 case 0x00000010: /* IA32_TIME_STAMP_COUNTER */ 2559 if (!g_fAtomicMsrMod) 2560 return VBCPUREPBADNESS_BOND_VILLAIN; 2561 break; 2562 2563 /* 2564 * The following MSRs are not safe to modify in a typical OS if we can't do it atomically, 2565 * i.e. read/modify/restore without allowing any other code to execute. Everything related 2566 * to syscalls will blow up in our face if we go back to userland with modified MSRs. 2567 */ 2568 // case 0x0000001b: /* IA32_APIC_BASE */ 2569 case 0xc0000081: /* MSR_K6_STAR */ 2570 case 0xc0000082: /* AMD64_STAR64 */ 2571 case 0xc0000083: /* AMD64_STARCOMPAT */ 2572 case 0xc0000084: /* AMD64_SYSCALL_FLAG_MASK */ 2573 case 0xc0000100: /* AMD64_FS_BASE */ 2574 case 0xc0000101: /* AMD64_GS_BASE */ 2575 case 0xc0000102: /* AMD64_KERNEL_GS_BASE */ 2576 if (!g_fAtomicMsrMod) 2577 return VBCPUREPBADNESS_MIGHT_BITE; 2578 break; 2579 2546 2580 case 0x000001a0: /* IA32_MISC_ENABLE */ 2547 2581 case 0x00000199: /* IA32_PERF_CTL */ 2548 2582 return VBCPUREPBADNESS_MIGHT_BITE; 2583 2584 case 0x000005a0: /* C2_PECI_CTL */ 2585 case 0x000005a1: /* C2_UNK_0000_05a1 */ 2586 if (g_enmVendor == CPUMCPUVENDOR_INTEL) 2587 return VBCPUREPBADNESS_MIGHT_BITE; 2588 break; 2589 2549 2590 case 0x00002000: /* P6_CR0. */ 2550 2591 case 0x00002003: /* P6_CR3. */ … … 3320 3361 if (g_enmMicroarch == kCpumMicroarch_Intel_P6_III) 3321 3362 fSkipMask |= RT_BIT(9); 3363 3364 /* If the OS uses the APIC, we have to be super careful. */ 3365 if (!g_fAtomicMsrMod) 3366 fSkipMask |= 0x0000000ffffff000; 3367 3322 3368 return reportMsr_GenFunctionEx(uMsr, "Ia32ApicBase", uValue, fSkipMask, 0, NULL); 3323 3369 } … … 3345 3391 RTThreadSleep(128); 3346 3392 } 3393 3394 /* If the OS is using MONITOR/MWAIT we'd better not disable it! */ 3395 if (!g_fAtomicMsrMod) 3396 fSkipMask |= RT_BIT(18); 3347 3397 3348 3398 /* The no execute related flag is deadly if clear. */ … … 3639 3689 uint64_t fSkipMask = 0; 3640 3690 if (vbCpuRepSupportsLongMode()) 3691 { 3641 3692 fSkipMask |= MSR_K6_EFER_LME; 3693 if (!g_fAtomicMsrMod && (uValue & MSR_K6_EFER_SCE)) 3694 fSkipMask |= MSR_K6_EFER_SCE; 3695 } 3642 3696 if ( (uValue & MSR_K6_EFER_NXE) 3643 3697 || vbCpuRepSupportsNX()) … … 4349 4403 * First try the the support library (also checks if we can really read MSRs). 4350 4404 */ 4351 int rc = SupDrvMsrProberInit(&g_MsrAcc );4405 int rc = SupDrvMsrProberInit(&g_MsrAcc, &g_fAtomicMsrMod); 4352 4406 if (RT_FAILURE(rc)) 4353 4407 { 4354 4408 #ifdef VBCR_HAVE_PLATFORM_MSR_PROBER 4355 4409 /* Next try a platform-specific interface. */ 4356 rc = PlatformMsrProberInit(&g_MsrAcc );4410 rc = PlatformMsrProberInit(&g_MsrAcc, &g_fAtomicMsrMod); 4357 4411 #endif 4358 4412 if (RT_FAILURE(rc)) … … 4409 4463 uint32_t cMsrs; 4410 4464 rc = findMsrs(&paMsrs, &cMsrs, fMsrMask); 4411 if ( !RT_FAILURE(rc))4465 if (RT_FAILURE(rc)) 4412 4466 return rc; 4413 4467 -
trunk/src/VBox/VMM/tools/VBoxCpuReport.h
r69582 r69584 25 25 extern void vbCpuRepDebug(const char *pszMsg, ...); 26 26 extern void vbCpuRepPrintf(const char *pszMsg, ...); 27 extern int SupDrvMsrProberInit(VBMSRFNS *fnsMsr );28 extern int PlatformMsrProberInit(VBMSRFNS *fnsMsr );27 extern int SupDrvMsrProberInit(VBMSRFNS *fnsMsr, bool *pfAtomicMsrMod); 28 extern int PlatformMsrProberInit(VBMSRFNS *fnsMsr, bool *pfAtomicMsrMod); 29 29 30 30
Note:
See TracChangeset
for help on using the changeset viewer.