Changeset 93722 in vbox for trunk/src/VBox
- Timestamp:
- Feb 14, 2022 12:52:49 PM (3 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/NEMR3.cpp
r93351 r93722 86 86 #ifdef RT_OS_WINDOWS 87 87 "|UseRing0Runloop" 88 #elif defined(RT_OS_DARWIN) 89 "|VmxPleGap" 90 "|VmxPleWindow" 91 "|VmxLbr" 88 92 #endif 89 93 , -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp
r93681 r93722 217 217 218 218 219 /** MSR permission flags type. */ 220 typedef uint32_t hv_msr_flags_t; 221 /** MSR can't be accessed. */ 222 #define HV_MSR_NONE 0 223 /** MSR is readable by the guest. */ 224 #define HV_MSR_READ RT_BIT(0) 225 /** MSR is writeable by the guest. */ 226 #define HV_MSR_WRITE RT_BIT(1) 227 228 219 229 typedef hv_return_t FN_HV_CAPABILITY(hv_capability_t capability, uint64_t *valu); 220 230 typedef hv_return_t FN_HV_VM_CREATE(hv_vm_options_t flags); … … 259 269 /* Since 11.0 */ 260 270 typedef hv_return_t FN_HV_VMX_VCPU_GET_CAP_WRITE_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t *allowed_0, uint64_t *allowed_1); 271 typedef hv_return_t FN_HV_VCPU_ENABLE_MANAGED_MSR(hv_vcpuid_t vcpu, uint32_t msr, bool enable); 272 typedef hv_return_t FN_HV_VCPU_SET_MSR_ACCESS(hv_vcpuid_t vcpu, uint32_t msr, hv_msr_flags_t flags); 261 273 262 274 … … 311 323 312 324 static FN_HV_VMX_VCPU_GET_CAP_WRITE_VMCS *g_pfnHvVmxVCpuGetCapWriteVmcs = NULL; /* Since 11.0 */ 325 static FN_HV_VCPU_ENABLE_MANAGED_MSR *g_pfnHvVCpuEnableManagedMsr = NULL; /* Since 11.0 */ 326 static FN_HV_VCPU_SET_MSR_ACCESS *g_pfnHvVCpuSetMsrAccess = NULL; /* Since 11.0 */ 313 327 /** @} */ 314 328 … … 361 375 NEM_DARWIN_IMPORT(true, g_pfnHvVmxVCpuSetShadowAccess, hv_vmx_vcpu_set_shadow_access), 362 376 NEM_DARWIN_IMPORT(false, g_pfnHvVmxVCpuSetApicAddress, hv_vmx_vcpu_set_apic_address), 363 NEM_DARWIN_IMPORT(true, g_pfnHvVmxVCpuGetCapWriteVmcs, hv_vmx_vcpu_get_cap_write_vmcs) 377 NEM_DARWIN_IMPORT(true, g_pfnHvVmxVCpuGetCapWriteVmcs, hv_vmx_vcpu_get_cap_write_vmcs), 378 NEM_DARWIN_IMPORT(true, g_pfnHvVCpuEnableManagedMsr, hv_vcpu_enable_managed_msr), 379 NEM_DARWIN_IMPORT(true, g_pfnHvVCpuSetMsrAccess, hv_vcpu_set_msr_access) 364 380 #undef NEM_DARWIN_IMPORT 365 381 }; … … 409 425 410 426 # define hv_vmx_vcpu_get_cap_write_vmcs g_pfnHvVmxVCpuGetCapWriteVmcs 427 # define hv_vcpu_enable_managed_msr g_pfnHvVCpuEnableManagedMsr 428 # define hv_vcpu_set_msr_access g_pfnHvVCpuSetMsrAccess 411 429 #endif 412 430 … … 1118 1136 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu); 1119 1137 READ_MSR(MSR_K8_TSC_AUX, pCtxMsrs->msr.TscAux); 1138 1139 /* Last Branch Record. */ 1140 if (pVM->nem.s.fLbr) 1141 { 1142 PVMXVMCSINFOSHARED const pVmcsInfoShared = &pVCpu->nem.s.vmx.VmcsInfo; 1143 uint32_t const idFromIpMsrStart = pVM->nem.s.idLbrFromIpMsrFirst; 1144 uint32_t const idToIpMsrStart = pVM->nem.s.idLbrToIpMsrFirst; 1145 uint32_t const cLbrStack = pVM->nem.s.idLbrFromIpMsrLast - pVM->nem.s.idLbrFromIpMsrFirst + 1; 1146 Assert(cLbrStack <= 32); 1147 for (uint32_t i = 0; i < cLbrStack; i++) 1148 { 1149 READ_MSR(idFromIpMsrStart + i, pVmcsInfoShared->au64LbrFromIpMsr[i]); 1150 1151 /* Some CPUs don't have a Branch-To-IP MSR (P4 and related Xeons). */ 1152 if (idToIpMsrStart != 0) 1153 READ_MSR(idToIpMsrStart + i, pVmcsInfoShared->au64LbrToIpMsr[i]); 1154 } 1155 1156 READ_MSR(pVM->nem.s.idLbrTosMsr, pVmcsInfoShared->u64LbrTosMsr); 1157 } 1120 1158 } 1121 1159 … … 1702 1740 WRITE_MSR(MSR_K8_TSC_AUX, pCtxMsrs->msr.TscAux); 1703 1741 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS); 1704 } 1705 1706 WRITE_VMCS_FIELD(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0 /*MSR_IA32_DEBUGCTL_LBR*/); 1742 1743 /* Last Branch Record. */ 1744 if (pVM->nem.s.fLbr) 1745 { 1746 PVMXVMCSINFOSHARED const pVmcsInfoShared = &pVCpu->nem.s.vmx.VmcsInfo; 1747 uint32_t const idFromIpMsrStart = pVM->nem.s.idLbrFromIpMsrFirst; 1748 uint32_t const idToIpMsrStart = pVM->nem.s.idLbrToIpMsrFirst; 1749 uint32_t const cLbrStack = pVM->nem.s.idLbrFromIpMsrLast - pVM->nem.s.idLbrFromIpMsrFirst + 1; 1750 Assert(cLbrStack <= 32); 1751 for (uint32_t i = 0; i < cLbrStack; i++) 1752 { 1753 WRITE_MSR(idFromIpMsrStart + i, pVmcsInfoShared->au64LbrFromIpMsr[i]); 1754 1755 /* Some CPUs don't have a Branch-To-IP MSR (P4 and related Xeons). */ 1756 if (idToIpMsrStart != 0) 1757 WRITE_MSR(idToIpMsrStart + i, pVmcsInfoShared->au64LbrToIpMsr[i]); 1758 } 1759 1760 WRITE_MSR(pVM->nem.s.idLbrTosMsr, pVmcsInfoShared->u64LbrTosMsr); 1761 } 1762 } 1707 1763 1708 1764 hv_vcpu_invalidate_tlb(pVCpu->nem.s.hVCpuId); … … 1910 1966 1911 1967 /** 1968 * Sets up the LBR MSR ranges based on the host CPU. 1969 * 1970 * @returns VBox status code. 1971 * @param pVM The cross context VM structure. 1972 * 1973 * @sa hmR0VmxSetupLbrMsrRange 1974 */ 1975 static int nemR3DarwinSetupLbrMsrRange(PVMCC pVM) 1976 { 1977 Assert(pVM->nem.s.fLbr); 1978 uint32_t idLbrFromIpMsrFirst; 1979 uint32_t idLbrFromIpMsrLast; 1980 uint32_t idLbrToIpMsrFirst; 1981 uint32_t idLbrToIpMsrLast; 1982 uint32_t idLbrTosMsr; 1983 1984 /* 1985 * Determine the LBR MSRs supported for this host CPU family and model. 1986 * 1987 * See Intel spec. 17.4.8 "LBR Stack". 1988 * See Intel "Model-Specific Registers" spec. 1989 */ 1990 uint32_t const uFamilyModel = (pVM->cpum.ro.HostFeatures.uFamily << 8) 1991 | pVM->cpum.ro.HostFeatures.uModel; 1992 switch (uFamilyModel) 1993 { 1994 case 0x0f01: case 0x0f02: 1995 idLbrFromIpMsrFirst = MSR_P4_LASTBRANCH_0; 1996 idLbrFromIpMsrLast = MSR_P4_LASTBRANCH_3; 1997 idLbrToIpMsrFirst = 0x0; 1998 idLbrToIpMsrLast = 0x0; 1999 idLbrTosMsr = MSR_P4_LASTBRANCH_TOS; 2000 break; 2001 2002 case 0x065c: case 0x065f: case 0x064e: case 0x065e: case 0x068e: 2003 case 0x069e: case 0x0655: case 0x0666: case 0x067a: case 0x0667: 2004 case 0x066a: case 0x066c: case 0x067d: case 0x067e: 2005 idLbrFromIpMsrFirst = MSR_LASTBRANCH_0_FROM_IP; 2006 idLbrFromIpMsrLast = MSR_LASTBRANCH_31_FROM_IP; 2007 idLbrToIpMsrFirst = MSR_LASTBRANCH_0_TO_IP; 2008 idLbrToIpMsrLast = MSR_LASTBRANCH_31_TO_IP; 2009 idLbrTosMsr = MSR_LASTBRANCH_TOS; 2010 break; 2011 2012 case 0x063d: case 0x0647: case 0x064f: case 0x0656: case 0x063c: 2013 case 0x0645: case 0x0646: case 0x063f: case 0x062a: case 0x062d: 2014 case 0x063a: case 0x063e: case 0x061a: case 0x061e: case 0x061f: 2015 case 0x062e: case 0x0625: case 0x062c: case 0x062f: 2016 idLbrFromIpMsrFirst = MSR_LASTBRANCH_0_FROM_IP; 2017 idLbrFromIpMsrLast = MSR_LASTBRANCH_15_FROM_IP; 2018 idLbrToIpMsrFirst = MSR_LASTBRANCH_0_TO_IP; 2019 idLbrToIpMsrLast = MSR_LASTBRANCH_15_TO_IP; 2020 idLbrTosMsr = MSR_LASTBRANCH_TOS; 2021 break; 2022 2023 case 0x0617: case 0x061d: case 0x060f: 2024 idLbrFromIpMsrFirst = MSR_CORE2_LASTBRANCH_0_FROM_IP; 2025 idLbrFromIpMsrLast = MSR_CORE2_LASTBRANCH_3_FROM_IP; 2026 idLbrToIpMsrFirst = MSR_CORE2_LASTBRANCH_0_TO_IP; 2027 idLbrToIpMsrLast = MSR_CORE2_LASTBRANCH_3_TO_IP; 2028 idLbrTosMsr = MSR_CORE2_LASTBRANCH_TOS; 2029 break; 2030 2031 /* Atom and related microarchitectures we don't care about: 2032 case 0x0637: case 0x064a: case 0x064c: case 0x064d: case 0x065a: 2033 case 0x065d: case 0x061c: case 0x0626: case 0x0627: case 0x0635: 2034 case 0x0636: */ 2035 /* All other CPUs: */ 2036 default: 2037 { 2038 LogRelFunc(("Could not determine LBR stack size for the CPU model %#x\n", uFamilyModel)); 2039 VMCC_GET_CPU_0(pVM)->nem.s.u32HMError = VMX_UFC_LBR_STACK_SIZE_UNKNOWN; 2040 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2041 } 2042 } 2043 2044 /* 2045 * Validate. 2046 */ 2047 uint32_t const cLbrStack = idLbrFromIpMsrLast - idLbrFromIpMsrFirst + 1; 2048 PCVMCPU pVCpu0 = VMCC_GET_CPU_0(pVM); 2049 AssertCompile( RT_ELEMENTS(pVCpu0->nem.s.vmx.VmcsInfo.au64LbrFromIpMsr) 2050 == RT_ELEMENTS(pVCpu0->nem.s.vmx.VmcsInfo.au64LbrToIpMsr)); 2051 if (cLbrStack > RT_ELEMENTS(pVCpu0->nem.s.vmx.VmcsInfo.au64LbrFromIpMsr)) 2052 { 2053 LogRelFunc(("LBR stack size of the CPU (%u) exceeds our buffer size\n", cLbrStack)); 2054 VMCC_GET_CPU_0(pVM)->nem.s.u32HMError = VMX_UFC_LBR_STACK_SIZE_OVERFLOW; 2055 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2056 } 2057 NOREF(pVCpu0); 2058 2059 /* 2060 * Update the LBR info. to the VM struct. for use later. 2061 */ 2062 pVM->nem.s.idLbrTosMsr = idLbrTosMsr; 2063 2064 pVM->nem.s.idLbrFromIpMsrFirst = idLbrFromIpMsrFirst; 2065 pVM->nem.s.idLbrFromIpMsrLast = idLbrFromIpMsrLast; 2066 2067 pVM->nem.s.idLbrToIpMsrFirst = idLbrToIpMsrFirst; 2068 pVM->nem.s.idLbrToIpMsrLast = idLbrToIpMsrLast; 2069 return VINF_SUCCESS; 2070 } 2071 2072 2073 /** 1912 2074 * Sets up pin-based VM-execution controls in the VMCS. 1913 2075 * … … 2015 2177 fVal |= VMX_PROC_CTLS2_RDTSCP; 2016 2178 2017 #if 02018 2179 /* Enable Pause-Loop exiting. */ 2019 2180 if ( (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT) 2020 && pVM-> hm.s.vmx.cPleGapTicks2021 && pVM-> hm.s.vmx.cPleWindowTicks)2181 && pVM->nem.s.cPleGapTicks 2182 && pVM->nem.s.cPleWindowTicks) 2022 2183 { 2023 2184 fVal |= VMX_PROC_CTLS2_PAUSE_LOOP_EXIT; 2024 2185 2025 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks); AssertRC(rc); 2026 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks); AssertRC(rc); 2027 } 2028 #endif 2186 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PLE_GAP, pVM->nem.s.cPleGapTicks); AssertRC(rc); 2187 rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PLE_WINDOW, pVM->nem.s.cPleWindowTicks); AssertRC(rc); 2188 } 2029 2189 2030 2190 if ((fVal & fZap) != fVal) … … 2057 2217 if (hrc == HV_SUCCESS) 2058 2218 return VINF_SUCCESS; 2219 2220 return nemR3DarwinHvSts2Rc(hrc); 2221 } 2222 2223 2224 /** 2225 * Sets the MSR to managed for the given vCPU allowing the guest to access it. 2226 * 2227 * @returns VBox status code. 2228 * @param pVCpu The cross context virtual CPU structure. 2229 * @param idMsr The MSR to enable managed access for. 2230 * @param fMsrPerm The MSR permissions flags. 2231 */ 2232 static int nemR3DarwinMsrSetManaged(PVMCPUCC pVCpu, uint32_t idMsr, hv_msr_flags_t fMsrPerm) 2233 { 2234 Assert(hv_vcpu_enable_managed_msr); 2235 2236 hv_return_t hrc = hv_vcpu_enable_managed_msr(pVCpu->nem.s.hVCpuId, idMsr, true /*enable*/); 2237 if (hrc == HV_SUCCESS) 2238 { 2239 hrc = hv_vcpu_set_msr_access(pVCpu->nem.s.hVCpuId, idMsr, fMsrPerm); 2240 if (hrc == HV_SUCCESS) 2241 return VINF_SUCCESS; 2242 } 2059 2243 2060 2244 return nemR3DarwinHvSts2Rc(hrc); … … 2124 2308 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_TSC_AUX); AssertRCReturn(rc, rc); 2125 2309 2310 /* Last Branch Record. */ 2311 if (pVM->nem.s.fLbr) 2312 { 2313 uint32_t const idFromIpMsrStart = pVM->nem.s.idLbrFromIpMsrFirst; 2314 uint32_t const idToIpMsrStart = pVM->nem.s.idLbrToIpMsrFirst; 2315 uint32_t const cLbrStack = pVM->nem.s.idLbrFromIpMsrLast - pVM->nem.s.idLbrFromIpMsrFirst + 1; 2316 Assert(cLbrStack <= 32); 2317 for (uint32_t i = 0; i < cLbrStack; i++) 2318 { 2319 rc = nemR3DarwinMsrSetManaged(pVCpu, idFromIpMsrStart + i, HV_MSR_READ); AssertRCReturn(rc, rc); 2320 2321 /* Some CPUs don't have a Branch-To-IP MSR (P4 and related Xeons). */ 2322 if (idToIpMsrStart != 0) 2323 { 2324 rc = nemR3DarwinMsrSetManaged(pVCpu, idToIpMsrStart + i, HV_MSR_READ); AssertRCReturn(rc, rc); 2325 } 2326 } 2327 2328 rc = nemR3DarwinMsrSetManaged(pVCpu, pVM->nem.s.idLbrTosMsr, HV_MSR_READ); AssertRCReturn(rc, rc); 2329 } 2330 2126 2331 return VINF_SUCCESS; 2127 2332 } … … 2214 2419 pVmcsInfo->u64Cr4Mask = u64Cr4Mask; 2215 2420 2216 #if 0 /** @todo */ 2217 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fLbr) 2218 { 2219 rc = VMXWriteVmcsNw(VMX_VMCS64_GUEST_DEBUGCTL_FULL, MSR_IA32_DEBUGCTL_LBR); 2421 if (pVCpu->CTX_SUFF(pVM)->nem.s.fLbr) 2422 { 2423 rc = nemR3DarwinWriteVmcs64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, MSR_IA32_DEBUGCTL_LBR); 2220 2424 AssertRC(rc); 2221 2425 } 2222 #endif2223 2426 return VINF_SUCCESS; 2224 2427 } … … 2363 2566 2364 2567 /** 2568 * Displays the HM Last-Branch-Record info. for the guest. 2569 * 2570 * @param pVM The cross context VM structure. 2571 * @param pHlp The info helper functions. 2572 * @param pszArgs Arguments, ignored. 2573 */ 2574 static DECLCALLBACK(void) nemR3DarwinInfoLbr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs) 2575 { 2576 NOREF(pszArgs); 2577 PVMCPU pVCpu = VMMGetCpu(pVM); 2578 if (!pVCpu) 2579 pVCpu = pVM->apCpusR3[0]; 2580 2581 Assert(pVM->nem.s.fLbr); 2582 2583 PCVMXVMCSINFOSHARED pVmcsInfoShared = &pVCpu->nem.s.vmx.VmcsInfo; 2584 uint32_t const cLbrStack = pVM->nem.s.idLbrFromIpMsrLast - pVM->nem.s.idLbrFromIpMsrFirst + 1; 2585 2586 /** @todo r=ramshankar: The index technically varies depending on the CPU, but 2587 * 0xf should cover everything we support thus far. Fix if necessary 2588 * later. */ 2589 uint32_t const idxTopOfStack = pVmcsInfoShared->u64LbrTosMsr & 0xf; 2590 if (idxTopOfStack > cLbrStack) 2591 { 2592 pHlp->pfnPrintf(pHlp, "Top-of-stack LBR MSR seems corrupt (index=%u, msr=%#RX64) expected index < %u\n", 2593 idxTopOfStack, pVmcsInfoShared->u64LbrTosMsr, cLbrStack); 2594 return; 2595 } 2596 2597 /* 2598 * Dump the circular buffer of LBR records starting from the most recent record (contained in idxTopOfStack). 2599 */ 2600 pHlp->pfnPrintf(pHlp, "CPU[%u]: LBRs (most-recent first)\n", pVCpu->idCpu); 2601 uint32_t idxCurrent = idxTopOfStack; 2602 Assert(idxTopOfStack < cLbrStack); 2603 Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr) <= cLbrStack); 2604 Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrToIpMsr) <= cLbrStack); 2605 for (;;) 2606 { 2607 if (pVM->nem.s.idLbrToIpMsrFirst) 2608 pHlp->pfnPrintf(pHlp, " Branch (%2u): From IP=%#016RX64 - To IP=%#016RX64\n", idxCurrent, 2609 pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent], pVmcsInfoShared->au64LbrToIpMsr[idxCurrent]); 2610 else 2611 pHlp->pfnPrintf(pHlp, " Branch (%2u): LBR=%#RX64\n", idxCurrent, pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent]); 2612 2613 idxCurrent = (idxCurrent - 1) % cLbrStack; 2614 if (idxCurrent == idxTopOfStack) 2615 break; 2616 } 2617 } 2618 2619 2620 /** 2365 2621 * Try initialize the native API. 2366 2622 * … … 2382 2638 * Some state init. 2383 2639 */ 2640 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/"); 2641 2642 /** @cfgm{/NEM/VmxPleGap, uint32_t, 0} 2643 * The pause-filter exiting gap in TSC ticks. When the number of ticks between 2644 * two successive PAUSE instructions exceeds VmxPleGap, the CPU considers the 2645 * latest PAUSE instruction to be start of a new PAUSE loop. 2646 */ 2647 int rc = CFGMR3QueryU32Def(pCfgNem, "VmxPleGap", &pVM->nem.s.cPleGapTicks, 0); 2648 AssertRCReturn(rc, rc); 2649 2650 /** @cfgm{/NEM/VmxPleWindow, uint32_t, 0} 2651 * The pause-filter exiting window in TSC ticks. When the number of ticks 2652 * between the current PAUSE instruction and first PAUSE of a loop exceeds 2653 * VmxPleWindow, a VM-exit is triggered. 2654 * 2655 * Setting VmxPleGap and VmxPleGap to 0 disables pause-filter exiting. 2656 */ 2657 rc = CFGMR3QueryU32Def(pCfgNem, "VmxPleWindow", &pVM->nem.s.cPleWindowTicks, 0); 2658 AssertRCReturn(rc, rc); 2659 2660 /** @cfgm{/NEM/VmxLbr, bool, false} 2661 * Whether to enable LBR for the guest. This is disabled by default as it's only 2662 * useful while debugging and enabling it causes a noticeable performance hit. */ 2663 rc = CFGMR3QueryBoolDef(pCfgNem, "VmxLbr", &pVM->nem.s.fLbr, false); 2664 AssertRCReturn(rc, rc); 2384 2665 2385 2666 /* … … 2389 2670 RTERRINFOSTATIC ErrInfo; 2390 2671 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo); 2391 intrc = nemR3DarwinLoadHv(fForced, pErrInfo);2672 rc = nemR3DarwinLoadHv(fForced, pErrInfo); 2392 2673 if (RT_SUCCESS(rc)) 2393 2674 { 2675 if ( !hv_vcpu_enable_managed_msr 2676 && pVM->nem.s.fLbr) 2677 { 2678 LogRel(("NEM: LBR recording is disabled because the Hypervisor API misses hv_vcpu_enable_managed_msr/hv_vcpu_set_msr_access functionality\n")); 2679 pVM->nem.s.fLbr = false; 2680 } 2681 2394 2682 if (hv_vcpu_run_until) 2395 2683 { … … 2460 2748 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg); 2461 2749 2750 if (pVM->nem.s.fLbr) 2751 { 2752 rc = DBGFR3InfoRegisterInternalEx(pVM, "lbr", "Dumps the NEM LBR info.", nemR3DarwinInfoLbr, DBGFINFO_FLAGS_ALL_EMTS); 2753 AssertRCReturn(rc, rc); 2754 } 2755 2462 2756 if (RTErrInfoIsSet(pErrInfo)) 2463 2757 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg)); … … 2612 2906 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER); 2613 2907 2908 if (pVM->nem.s.fLbr) 2909 { 2910 int rc = nemR3DarwinSetupLbrMsrRange(pVM); 2911 AssertRCReturn(rc, rc); 2912 } 2913 2614 2914 /* 2615 2915 * Setup the EMTs. -
trunk/src/VBox/VMM/include/NEMInternal.h
r93680 r93722 266 266 /** Set if hv_vm_space_create() was called successfully. */ 267 267 bool fCreatedAsid : 1; 268 /** Set if Last Branch Record (LBR) is enabled. */ 269 bool fLbr; 268 270 /** The ASID for this VM (only valid if fCreatedAsid is true). */ 269 271 hv_vm_space_t uVmAsid; 270 272 /** Number of mach time units per NS, for hv_vcpu_run_until(). */ 271 273 uint64_t cMachTimePerNs; 274 /** Pause-loop exiting (PLE) gap in ticks. */ 275 uint32_t cPleGapTicks; 276 /** Pause-loop exiting (PLE) window in ticks. */ 277 uint32_t cPleWindowTicks; 278 279 /** The host LBR TOS (top-of-stack) MSR id. */ 280 uint32_t idLbrTosMsr; 281 282 /** The first valid host LBR branch-from-IP stack range. */ 283 uint32_t idLbrFromIpMsrFirst; 284 /** The last valid host LBR branch-from-IP stack range. */ 285 uint32_t idLbrFromIpMsrLast; 286 287 /** The first valid host LBR branch-to-IP stack range. */ 288 uint32_t idLbrToIpMsrFirst; 289 /** The last valid host LBR branch-to-IP stack range. */ 290 uint32_t idLbrToIpMsrLast; 291 272 292 STAMCOUNTER StatMapPage; 273 293 STAMCOUNTER StatUnmapPage;
Note:
See TracChangeset
for help on using the changeset viewer.