Changeset 102690 in vbox for trunk/src/VBox
- Timestamp:
- Dec 22, 2023 8:53:01 AM (14 months ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r102630 r102690 3166 3166 3167 3167 /** 3168 * Inserts variable-range MTRR MSR ranges based on the given count. 3169 * 3170 * Since we need to insert the MSRs beyond what the CPU profile has inserted, we 3171 * reinsert the whole range here since the variable-range MTRR MSR read+write 3172 * functions handle ranges as well as the \#GP checking. 3173 * 3174 * @returns VBox status code. 3175 * @param pVM The cross context VM structure. 3176 * @param cVarMtrrs The number of variable-range MTRRs to insert. This must be 3177 * less than or equal to CPUMCTX_MAX_MTRRVAR_COUNT. 3178 */ 3179 static int cpumR3VarMtrrMsrRangeInsert(PVM pVM, uint8_t const cVarMtrrs) 3180 { 3181 #ifdef VBOX_WITH_STATISTICS 3182 # define CPUM_MTRR_PHYSBASE_MSRRANGE(a_uMsr, a_uValue, a_szName) \ 3183 { (a_uMsr), (a_uMsr), kCpumMsrRdFn_Ia32MtrrPhysBaseN, kCpumMsrWrFn_Ia32MtrrPhysBaseN, 0, 0, a_uValue, 0, 0, a_szName, { 0 }, { 0 }, { 0 }, { 0 } } 3184 # define CPUM_MTRR_PHYSMASK_MSRRANGE(a_uMsr, a_uValue, a_szName) \ 3185 { (a_uMsr), (a_uMsr), kCpumMsrRdFn_Ia32MtrrPhysMaskN, kCpumMsrWrFn_Ia32MtrrPhysMaskN, 0, 0, a_uValue, 0, 0, a_szName, { 0 }, { 0 }, { 0 }, { 0 } } 3186 #else 3187 # define CPUM_MTRR_PHYSBASE_MSRRANGE(a_uMsr, a_uValue, a_szName) \ 3188 { (a_uMsr), (a_uMsr), kCpumMsrRdFn_Ia32MtrrPhysBaseN, kCpumMsrWrFn_Ia32MtrrPhysBaseN, 0, 0, a_uValue, 0, 0, a_szName } 3189 # define CPUM_MTRR_PHYSMASK_MSRRANGE(a_uMsr, a_uValue, a_szName) \ 3190 { (a_uMsr), (a_uMsr), kCpumMsrRdFn_Ia32MtrrPhysMaskN, kCpumMsrWrFn_Ia32MtrrPhysMaskN, 0, 0, a_uValue, 0, 0, a_szName } 3191 #endif 3192 static CPUMMSRRANGE const s_aMsrRanges_MtrrPhysBase[CPUMCTX_MAX_MTRRVAR_COUNT] = 3193 { 3194 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE0, 0, "MSR_IA32_MTRR_PHYSBASE0"), 3195 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE1, 1, "MSR_IA32_MTRR_PHYSBASE1"), 3196 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE2, 2, "MSR_IA32_MTRR_PHYSBASE2"), 3197 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE3, 3, "MSR_IA32_MTRR_PHYSBASE3"), 3198 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE4, 4, "MSR_IA32_MTRR_PHYSBASE4"), 3199 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE5, 5, "MSR_IA32_MTRR_PHYSBASE5"), 3200 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE6, 6, "MSR_IA32_MTRR_PHYSBASE6"), 3201 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE7, 7, "MSR_IA32_MTRR_PHYSBASE7"), 3202 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE8, 8, "MSR_IA32_MTRR_PHYSBASE8"), 3203 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE9, 9, "MSR_IA32_MTRR_PHYSBASE9"), 3204 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE9 + 2, 10, "MSR_IA32_MTRR_PHYSBASE10"), 3205 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE9 + 4, 11, "MSR_IA32_MTRR_PHYSBASE11"), 3206 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE9 + 6, 12, "MSR_IA32_MTRR_PHYSBASE12"), 3207 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE9 + 8, 13, "MSR_IA32_MTRR_PHYSBASE13"), 3208 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE9 + 10, 14, "MSR_IA32_MTRR_PHYSBASE14"), 3209 CPUM_MTRR_PHYSBASE_MSRRANGE(MSR_IA32_MTRR_PHYSBASE9 + 12, 15, "MSR_IA32_MTRR_PHYSBASE15"), 3210 }; 3211 static CPUMMSRRANGE const s_aMsrRanges_MtrrPhysMask[CPUMCTX_MAX_MTRRVAR_COUNT] = 3212 { 3213 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK0, 0, "MSR_IA32_MTRR_PHYSMASK0"), 3214 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK1, 1, "MSR_IA32_MTRR_PHYSMASK1"), 3215 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK2, 2, "MSR_IA32_MTRR_PHYSMASK2"), 3216 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK3, 3, "MSR_IA32_MTRR_PHYSMASK3"), 3217 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK4, 4, "MSR_IA32_MTRR_PHYSMASK4"), 3218 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK5, 5, "MSR_IA32_MTRR_PHYSMASK5"), 3219 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK6, 6, "MSR_IA32_MTRR_PHYSMASK6"), 3220 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK7, 7, "MSR_IA32_MTRR_PHYSMASK7"), 3221 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK8, 8, "MSR_IA32_MTRR_PHYSMASK8"), 3222 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK9, 9, "MSR_IA32_MTRR_PHYSMASK9"), 3223 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK9 + 2, 10, "MSR_IA32_MTRR_PHYSMASK10"), 3224 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK9 + 4, 11, "MSR_IA32_MTRR_PHYSMASK11"), 3225 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK9 + 6, 12, "MSR_IA32_MTRR_PHYSMASK12"), 3226 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK9 + 8, 13, "MSR_IA32_MTRR_PHYSMASK13"), 3227 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK9 + 10, 14, "MSR_IA32_MTRR_PHYSMASK14"), 3228 CPUM_MTRR_PHYSMASK_MSRRANGE(MSR_IA32_MTRR_PHYSMASK9 + 12, 15, "MSR_IA32_MTRR_PHYSMASK15"), 3229 }; 3230 AssertCompile(RT_ELEMENTS(s_aMsrRanges_MtrrPhysBase) == RT_ELEMENTS(pVM->apCpusR3[0]->cpum.s.GuestMsrs.msr.aMtrrVarMsrs)); 3231 AssertCompile(RT_ELEMENTS(s_aMsrRanges_MtrrPhysMask) == RT_ELEMENTS(pVM->apCpusR3[0]->cpum.s.GuestMsrs.msr.aMtrrVarMsrs)); 3232 3233 Assert(cVarMtrrs <= RT_ELEMENTS(pVM->apCpusR3[0]->cpum.s.GuestMsrs.msr.aMtrrVarMsrs)); 3234 for (unsigned i = 0; i < cVarMtrrs; i++) 3235 { 3236 int rc = CPUMR3MsrRangesInsert(pVM, &s_aMsrRanges_MtrrPhysBase[i]); 3237 AssertLogRelRCReturn(rc, rc); 3238 rc = CPUMR3MsrRangesInsert(pVM, &s_aMsrRanges_MtrrPhysMask[i]); 3239 AssertLogRelRCReturn(rc, rc); 3240 } 3241 return VINF_SUCCESS; 3242 3243 #undef CPUM_MTRR_PHYSBASE_MSRRANGE 3244 #undef CPUM_MTRR_PHYSMASK_MSRRANGE 3245 } 3246 3247 3248 /** 3168 3249 * Initialize MTRR capability based on what the guest CPU profile (typically host) 3169 3250 * supports. 3170 3251 * 3171 3252 * @returns VBox status code. 3172 * @param pVM The cross context VM structure. 3253 * @param pVM The cross context VM structure. 3254 * @param fMtrrVarCountIsVirt Whether the variable-range MTRR count is fully 3255 * virtualized (@c true) or derived from the CPU 3256 * profile (@c false). 3173 3257 */ 3174 static int cpumR3InitMtrrCap(PVM pVM )3258 static int cpumR3InitMtrrCap(PVM pVM, bool fMtrrVarCountIsVirt) 3175 3259 { 3176 3260 #ifdef RT_ARCH_AMD64 … … 3184 3268 3185 3269 /* Construct guest MTRR support capabilities. */ 3186 uint8_t const cGuestVarRangeRegs = RT_MIN(cProfileVarRangeRegs, CPUMCTX_MAX_MTRRVAR_COUNT); 3270 uint8_t const cGuestVarRangeRegs = fMtrrVarCountIsVirt ? CPUMCTX_MAX_MTRRVAR_COUNT 3271 : RT_MIN(cProfileVarRangeRegs, CPUMCTX_MAX_MTRRVAR_COUNT); 3187 3272 uint64_t const uGstMtrrCap = cGuestVarRangeRegs 3188 3273 | MSR_IA32_MTRR_CAP_FIX … … 3197 3282 } 3198 3283 3199 LogRel(("CPUM: Enabled fixed-range MTRRs and %u variable-range MTRRs\n", cGuestVarRangeRegs)); 3284 if (fMtrrVarCountIsVirt) 3285 { 3286 /* 3287 * Insert the full variable-range MTRR MSR range ourselves so it extends beyond what is 3288 * typically reported by the hardware CPU profile. 3289 */ 3290 LogRel(("CPUM: Enabled fixed-range MTRRs and %u (virtualized) variable-range MTRRs\n", cGuestVarRangeRegs)); 3291 return cpumR3VarMtrrMsrRangeInsert(pVM, cGuestVarRangeRegs); 3292 } 3200 3293 3201 3294 /* … … 3203 3296 * are consistent with what is reported to the guest via CPUID. 3204 3297 */ 3298 LogRel(("CPUM: Enabled fixed-range MTRRs and %u (CPU profile derived) variable-range MTRRs\n", cGuestVarRangeRegs)); 3205 3299 return cpumR3FixVarMtrrPhysAddrWidths(pVM, cGuestVarRangeRegs); 3206 3300 } … … 3440 3534 * MTRR support. 3441 3535 * We've always reported the MTRR feature bit in CPUID. 3442 * Here we allow exposing MTRRs with reasonable default values just to get Nested Hyper-V 3443 * going. MTRR support isn't feature complete, see @bugref{10318} and bugref{10498}. 3536 * Here we allow exposing MTRRs with reasonable default values (especially required 3537 * by Windows 10 guests with Hyper-V enabled). The MTRR support isn't feature 3538 * complete, see @bugref{10318} and bugref{10498}. 3444 3539 */ 3445 3540 if (pVM->cpum.s.GuestFeatures.fMtrr) 3446 3541 { 3447 /* Check if MTRR read+write support is enabled. */ 3542 /** @cfgm{/CPUM/MtrrWrite, boolean, true} 3543 * Whether to enable MTRR read+write support. When enabled, this automatically 3544 * enables MTRR read support as well. */ 3448 3545 bool fEnableMtrrWrite; 3449 rc = CFGMR3QueryBoolDef(pCpumCfg, "M TRRWrite", &fEnableMtrrWrite,3546 rc = CFGMR3QueryBoolDef(pCpumCfg, "MtrrWrite", &fEnableMtrrWrite, 3450 3547 false /** @todo true - 2023-12-12 bird: does not work yet, so disabled it */); 3451 3548 AssertRCReturn(rc, rc); … … 3458 3555 else 3459 3556 { 3460 /* Check if MTRR read-only reporting is enabled. */ 3461 rc = CFGMR3QueryBoolDef(pCpumCfg, "MTRR", &pVM->cpum.s.fMtrrRead, false); 3557 /** @cfgm{/CPUM/MtrrRead, boolean, false} 3558 * Whether to enable MTRR read support and to initialize mapping of guest memory via 3559 * MTRRs. When disabled, MTRRs are left blank, returns 0 on reads and ignores 3560 * writes. Some guests like GNU/Linux recognize a virtual system when MTRRs are left 3561 * blank but some guests may expect their RAM to be mapped via MTRRs similar to 3562 * real hardware. */ 3563 rc = CFGMR3QueryBoolDef(pCpumCfg, "MtrrRead", &pVM->cpum.s.fMtrrRead, false); 3462 3564 AssertRCReturn(rc, rc); 3463 3565 LogRel(("CPUM: Enabled MTRR read-only support\n")); … … 3468 3570 if (pVM->cpum.s.fMtrrRead) 3469 3571 { 3470 rc = cpumR3InitMtrrCap(pVM); 3572 /** @cfgm{/CPUM/MtrrVarCountIsVirtual, boolean, true} 3573 * When enabled, the number of variable-range MTRRs are virtualized. When disabled, 3574 * the number of variable-range MTRRs are derived from the CPU profile. Unless 3575 * guests have problems with the virtualized variable-range MTRR count, it is 3576 * recommended to keep this enabled so that there are sufficient MTRRs to fully 3577 * describe all regions of the guest RAM. */ 3578 bool fMtrrVarCountIsVirt; 3579 rc = CFGMR3QueryBoolDef(pCpumCfg, "MtrrVarCountIsVirtual", &fMtrrVarCountIsVirt, true); 3580 AssertRCReturn(rc, rc); 3581 3582 rc = cpumR3InitMtrrCap(pVM, fMtrrVarCountIsVirt); 3471 3583 if (RT_SUCCESS(rc)) 3472 3584 { /* likely */ }
Note:
See TracChangeset
for help on using the changeset viewer.