Changeset 102665 in vbox
- Timestamp:
- Dec 21, 2023 8:06:11 AM (15 months ago)
- svn:sync-xref-src-repo-rev:
- 160829
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r102523 r102665 182 182 typedef CPUMDUMPTYPE *PCPUMDUMPTYPE; 183 183 184 /** 185 * Map of variable-range MTRRs. 186 */ 187 typedef struct CPUMMTRRMAP 188 { 189 /** The index of the next available MTRR. */ 190 uint8_t idxMtrr; 191 /** The number of usable MTRRs. */ 192 uint8_t cMtrrs; 193 /** Alignment padding. */ 194 uint16_t uAlign; 195 /** The number of bytes to map via these MTRRs. */ 196 uint64_t cbToMap; 197 /** The number of bytes mapped via these MTRRs. */ 198 uint64_t cbMapped; 199 /** The variable-range MTRRs. */ 200 X86MTRRVAR aMtrrs[CPUMCTX_MAX_MTRRVAR_COUNT]; 201 } CPUMMTRRMAP; 202 /** Pointer to a CPUM variable-range MTRR structure. */ 203 typedef CPUMMTRRMAP *PCPUMMTRRMAP; 204 /** Pointer to a const CPUM variable-range MTRR structure. */ 205 typedef CPUMMTRRMAP const *PCCPUMMTRRMAP; 184 206 185 207 /********************************************************************************************************************************* … … 3217 3239 3218 3240 /** 3241 * Gets the variable-range MTRR physical address mask given an address range. 3242 * 3243 * @returns The MTRR physical address mask. 3244 * @param pVM The cross context VM structure. 3245 * @param GCPhysFirst The first guest-physical address of the memory range 3246 * (inclusive). 3247 * @param GCPhysLast The last guest-physical address of the memory range 3248 * (inclusive). 3249 */ 3250 static uint64_t cpumR3GetVarMtrrMask(PVM pVM, RTGCPHYS GCPhysFirst, RTGCPHYS GCPhysLast) 3251 { 3252 RTGCPHYS const GCPhysLength = GCPhysLast - GCPhysFirst; 3253 uint64_t const fInvPhysMask = ~(RT_BIT_64(pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U); 3254 RTGCPHYS const GCPhysMask = (~(GCPhysLength - 1) & ~fInvPhysMask) & X86_PAGE_BASE_MASK; 3255 #ifdef VBOX_STRICT 3256 AssertMsg(GCPhysLast == ((GCPhysFirst | ~GCPhysMask) & ~fInvPhysMask), 3257 ("last=%RGp first=%RGp mask=%RGp inv_mask=%RGp\n", GCPhysLast, GCPhysFirst, GCPhysMask, fInvPhysMask)); 3258 AssertMsg(((GCPhysLast & GCPhysMask) == (GCPhysFirst & GCPhysMask)), 3259 ("last=%RGp first=%RGp mask=%RGp inv_mask=%RGp\n", GCPhysLast, GCPhysFirst, GCPhysMask, fInvPhysMask)); 3260 AssertMsg(((GCPhysLast + 1) & GCPhysMask) != (GCPhysFirst & GCPhysMask), 3261 ("last=%RGp first=%RGp mask=%RGp inv_mask=%RGp\n", GCPhysLast, GCPhysFirst, GCPhysMask, fInvPhysMask)); 3262 3263 uint64_t const cbRange = GCPhysLast - GCPhysFirst + 1; 3264 AssertMsg(cbRange >= _4K, ("last=%RGp first=%RGp mask=%RGp inv_mask=%RGp cb=%RU64\n", 3265 GCPhysLast, GCPhysFirst, GCPhysMask, fInvPhysMask, cbRange)); 3266 AssertMsg(RT_IS_POWER_OF_TWO(cbRange), ("last=%RGp first=%RGp mask=%RGp inv_mask=%RGp cb=%RU64\n", 3267 GCPhysLast, GCPhysFirst, GCPhysMask, fInvPhysMask, cbRange)); 3268 AssertMsg(GCPhysFirst == 0 || cbRange <= GCPhysFirst, ("last=%RGp first=%RGp mask=%RGp inv_mask=%RGp cb=%RU64\n", 3269 GCPhysLast, GCPhysFirst, GCPhysMask, fInvPhysMask, cbRange)); 3270 #endif 3271 return GCPhysMask; 3272 } 3273 3274 3275 /** 3276 * Gets the first and last guest-physical address for the given variable-range 3277 * MTRR. 3278 * 3279 * @param pVM The cross context VM structure. 3280 * @param pMtrrVar The variable-range MTRR. 3281 * @param pGCPhysFirst Where to store the first guest-physical address of the 3282 * memory range (inclusive). 3283 * @param pGCPhysLast Where to store the last guest-physical address of the 3284 * memory range (inclusive). 3285 */ 3286 static void cpumR3GetVarMtrrAddrs(PVM pVM, PCX86MTRRVAR pMtrrVar, PRTGCPHYS pGCPhysFirst, PRTGCPHYS pGCPhysLast) 3287 { 3288 Assert(pMtrrVar); 3289 Assert(pGCPhysFirst); 3290 Assert(pGCPhysLast); 3291 uint64_t const fInvPhysMask = ~(RT_BIT_64(pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U); 3292 RTGCPHYS const GCPhysMask = pMtrrVar->MtrrPhysMask & X86_PAGE_BASE_MASK; 3293 RTGCPHYS const GCPhysFirst = pMtrrVar->MtrrPhysBase & X86_PAGE_BASE_MASK; 3294 RTGCPHYS const GCPhysLast = (GCPhysFirst | ~GCPhysMask) & ~fInvPhysMask; 3295 Assert((GCPhysLast & GCPhysMask) == (GCPhysFirst & GCPhysMask)); 3296 Assert(((GCPhysLast + 1) & GCPhysMask) != (GCPhysFirst & GCPhysMask)); 3297 *pGCPhysFirst = GCPhysFirst; 3298 *pGCPhysLast = GCPhysLast; 3299 } 3300 3301 3302 /** 3303 * Gets the previous power of two for a given value. 3304 * 3305 * @returns Previous power of two. 3306 * @param uVal The value (must not be zero). 3307 */ 3308 static uint64_t cpumR3GetPrevPowerOfTwo(uint64_t uVal) 3309 { 3310 Assert(uVal > 1); 3311 uint8_t const cBits = sizeof(uVal) << 3; 3312 return RT_BIT_64(cBits - 1 - ASMCountLeadingZerosU64(uVal)); 3313 } 3314 3315 3316 /** 3317 * Gets the next power of two for a given value. 3318 * 3319 * @returns Next power of two. 3320 * @param uVal The value (must not be zero). 3321 */ 3322 static uint64_t cpumR3GetNextPowerOfTwo(uint64_t uVal) 3323 { 3324 Assert(uVal > 1); 3325 uint8_t const cBits = sizeof(uVal) << 3; 3326 return RT_BIT_64(cBits - ASMCountLeadingZerosU64(uVal)); 3327 } 3328 3329 3330 /** 3331 * Gets the MTRR memory type description. 3332 * 3333 * @returns The MTRR memory type description. 3334 * @param fType The MTRR memory type. 3335 */ 3336 static const char *cpumR3GetVarMtrrMemType(uint8_t fType) 3337 { 3338 switch (fType) 3339 { 3340 case X86_MTRR_MT_UC: return "UC"; 3341 case X86_MTRR_MT_WC: return "WC"; 3342 case X86_MTRR_MT_WT: return "WT"; 3343 case X86_MTRR_MT_WP: return "WP"; 3344 case X86_MTRR_MT_WB: return "WB"; 3345 default: return "--"; 3346 } 3347 } 3348 3349 3350 /** 3351 * Adds a memory region to the given MTRR map. 3352 * 3353 * @returns VBox status code. 3354 * @retval VINF_SUCCESS when the map could accommodate a memory region being 3355 * added. 3356 * @retval VERR_OUT_OF_RESOURCES when the map ran out of room while adding the 3357 * memory region. 3358 * 3359 * @param pVM The cross context VM structure. 3360 * @param pMtrrMap The variable-range MTRR map to add to. 3361 * @param GCPhysFirst The first guest-physical address in the memory region. 3362 * @param GCPhysLast The last guest-physical address in the memory region. 3363 * @param fType The MTRR memory type of the memory region being added. 3364 */ 3365 static int cpumR3MtrrMapAddRegion(PVM pVM, PCPUMMTRRMAP pMtrrMap, RTGCPHYS GCPhysFirst, RTGCPHYS GCPhysLast, uint8_t fType) 3366 { 3367 Assert(fType < 7 && fType != 2 && fType != 3); 3368 if (pMtrrMap->idxMtrr < pMtrrMap->cMtrrs) 3369 { 3370 pMtrrMap->aMtrrs[pMtrrMap->idxMtrr].MtrrPhysBase = GCPhysFirst | fType; 3371 pMtrrMap->aMtrrs[pMtrrMap->idxMtrr].MtrrPhysMask = cpumR3GetVarMtrrMask(pVM, GCPhysFirst, GCPhysLast) 3372 | MSR_IA32_MTRR_PHYSMASK_VALID; 3373 ++pMtrrMap->idxMtrr; 3374 3375 uint64_t const cbRange = GCPhysLast - GCPhysFirst + 1; 3376 if (fType != X86_MTRR_MT_UC) 3377 pMtrrMap->cbMapped += cbRange; 3378 else 3379 { 3380 Assert(pMtrrMap->cbMapped >= cbRange); 3381 pMtrrMap->cbMapped -= cbRange; 3382 } 3383 return VINF_SUCCESS; 3384 } 3385 return VERR_OUT_OF_RESOURCES; 3386 } 3387 3388 3389 /** 3390 * Adds an MTRR to the given MTRR map. 3391 * 3392 * @returns VBox status code. 3393 * @retval VINF_SUCCESS when the map could accommodate the MTRR being added. 3394 * @retval VERR_OUT_OF_RESOURCES when the map ran out of room while adding the 3395 * MTRR. 3396 * 3397 * @param pVM The cross context VM structure. 3398 * @param pMtrrMap The variable-range MTRR map to add to. 3399 * @param pVarMtrr The variable-range MTRR to add from. 3400 */ 3401 static int cpumR3MtrrMapAddMtrr(PVM pVM, PCPUMMTRRMAP pMtrrMap, PCX86MTRRVAR pVarMtrr) 3402 { 3403 RTGCPHYS GCPhysFirst; 3404 RTGCPHYS GCPhysLast; 3405 cpumR3GetVarMtrrAddrs(pVM, pVarMtrr, &GCPhysFirst, &GCPhysLast); 3406 uint8_t const fType = pVarMtrr->MtrrPhysBase & MSR_IA32_MTRR_PHYSBASE_MT_MASK; 3407 return cpumR3MtrrMapAddRegion(pVM, pMtrrMap, GCPhysFirst, GCPhysLast, fType); 3408 } 3409 3410 3411 /** 3412 * Adds a source MTRR map to the given destination MTRR map. 3413 * 3414 * @returns VBox status code. 3415 * @retval VINF_SUCCESS when the map could fully accommodate the map being added. 3416 * @retval VERR_OUT_OF_RESOURCES when the map ran out of room while adding the 3417 * specified map. 3418 * 3419 * @param pVM The cross context VM structure. 3420 * @param pMtrrMapDst The variable-range MTRR map to add to (destination). 3421 * @param pMtrrMapSrc The variable-range MTRR map to add from (source). 3422 */ 3423 static int cpumR3MtrrMapAddMap(PVM pVM, PCPUMMTRRMAP pMtrrMapDst, PCCPUMMTRRMAP pMtrrMapSrc) 3424 { 3425 Assert(pMtrrMapDst); 3426 Assert(pMtrrMapSrc); 3427 for (uint8_t i = 0 ; i < pMtrrMapSrc->idxMtrr; i++) 3428 { 3429 int const rc = cpumR3MtrrMapAddMtrr(pVM, pMtrrMapDst, &pMtrrMapSrc->aMtrrs[i]); 3430 if (RT_FAILURE(rc)) 3431 return rc; 3432 } 3433 return VINF_SUCCESS; 3434 } 3435 3436 3437 /** 3438 * Maps memory using an additive method using variable-range MTRRs. 3439 * 3440 * The additive method fits as many valid MTRR WB (write-back) sub-regions to map 3441 * the specified memory size. For instance, 3584 MB is mapped as 2048 MB, 1024 MB 3442 * and 512 MB of WB memory, requiring 3 MTRRs. 3443 * 3444 * @returns VBox status code. 3445 * @retval VINF_SUCCESS when the requested memory could be fully mapped within the 3446 * given number of MTRRs. 3447 * @retval VERR_OUT_OF_RESOURCES when the requested memory could not be fully 3448 * mapped within the given number of MTRRs. 3449 * 3450 * @param pVM The cross context VM structure. 3451 * @param GCPhysRegionFirst The guest-physical address in the region being 3452 * mapped. 3453 * @param cb The number of bytes being mapped. 3454 * @param pMtrrMap The variable-range MTRR map to populate. 3455 */ 3456 static int cpumR3MapMtrrsAdditive(PVM pVM, RTGCPHYS GCPhysRegionFirst, uint64_t cb, PCPUMMTRRMAP pMtrrMap) 3457 { 3458 Assert(pMtrrMap); 3459 Assert(pMtrrMap->cMtrrs > 1); 3460 Assert(cb >= _4K); 3461 Assert(!(GCPhysRegionFirst & X86_PAGE_4K_OFFSET_MASK)); 3462 3463 uint64_t cbLeft = cb; 3464 uint64_t offRegion = GCPhysRegionFirst; 3465 int rc = VINF_SUCCESS; 3466 while (cbLeft > 0) 3467 { 3468 uint64_t const cbRegion = !RT_IS_POWER_OF_TWO(cbLeft) ? cpumR3GetPrevPowerOfTwo(cbLeft) : cbLeft; 3469 3470 Log3(("CPUM: MTRR: Add[%u]: %' Rhcb (%RU64 bytes)\n", pMtrrMap->idxMtrr, cbRegion, cbRegion)); 3471 rc = cpumR3MtrrMapAddRegion(pVM, pMtrrMap, offRegion, offRegion + cbRegion - 1, X86_MTRR_MT_WB); 3472 if (RT_FAILURE(rc)) 3473 return rc; 3474 3475 cbLeft -= RT_MIN(cbRegion, cbLeft); 3476 offRegion += cbRegion; 3477 } 3478 return VINF_SUCCESS; 3479 } 3480 3481 3482 /** 3483 * Maps memory using a subtractive method using variable-range MTRRs. 3484 * 3485 * The subtractive method rounds up the memory region using WB (write-back) memory 3486 * type and then "subtracts" sub-regions using UC (uncacheable) memory type. For 3487 * instance, 3584 MB is mapped as 4096 MB of WB minus 512 MB of UC, requiring 2 3488 * MTRRs. 3489 * 3490 * @returns VBox status code. 3491 * @retval VINF_SUCCESS when the requested memory could be fully mapped within the 3492 * given number of MTRRs. 3493 * @retval VERR_OUT_OF_RESOURCES when the requested memory could not be fully 3494 * mapped within the given number of MTRRs. 3495 * 3496 * @param pVM The cross context VM structure. 3497 * @param GCPhysRegionFirst The guest-physical address in the region being 3498 * mapped. 3499 * @param cb The number of bytes being mapped. 3500 * @param pMtrrMap The variable-range MTRR map to populate. 3501 */ 3502 static int cpumR3MapMtrrsSubtractive(PVM pVM, RTGCPHYS GCPhysRegionFirst, uint64_t cb, PCPUMMTRRMAP pMtrrMap) 3503 { 3504 Assert(pMtrrMap); 3505 Assert(pMtrrMap->cMtrrs > 1); 3506 Assert(cb >= _4K); 3507 Assert(!(GCPhysRegionFirst & X86_PAGE_4K_OFFSET_MASK)); 3508 3509 uint64_t const cbRegion = !RT_IS_POWER_OF_TWO(cb) ? cpumR3GetNextPowerOfTwo(cb) : cb; 3510 Assert(cbRegion >= cb); 3511 3512 Log3(("CPUM: MTRR: Sub[%u]: %' Rhcb (%RU64 bytes) [WB]\n", pMtrrMap->idxMtrr, cbRegion, cbRegion)); 3513 int rc = cpumR3MtrrMapAddRegion(pVM, pMtrrMap, GCPhysRegionFirst, GCPhysRegionFirst + cbRegion - 1, X86_MTRR_MT_WB); 3514 if (RT_FAILURE(rc)) 3515 return rc; 3516 3517 uint64_t cbLeft = cbRegion - cb; 3518 RTGCPHYS offRegion = GCPhysRegionFirst + cbRegion; 3519 while (cbLeft > 0) 3520 { 3521 uint64_t const cbSubRegion = cpumR3GetPrevPowerOfTwo(cbLeft); 3522 3523 Log3(("CPUM: MTRR: Sub[%u]: %' Rhcb (%RU64 bytes) [UC]\n", pMtrrMap->idxMtrr, cbSubRegion, cbSubRegion)); 3524 rc = cpumR3MtrrMapAddRegion(pVM, pMtrrMap, offRegion - cbSubRegion, offRegion - 1, X86_MTRR_MT_UC); 3525 if (RT_FAILURE(rc)) 3526 return rc; 3527 3528 cbLeft -= RT_MIN(cbSubRegion, cbLeft); 3529 offRegion -= cbSubRegion; 3530 } 3531 return rc; 3532 } 3533 3534 3535 /** 3536 * Optimally maps RAM when it's not necessarily aligned to a power of two using 3537 * variable-range MTRRs. 3538 * 3539 * @returns VBox status code. 3540 * @retval VINF_SUCCESS when the requested memory could be fully mapped within the 3541 * given number of MTRRs. 3542 * @retval VERR_OUT_OF_RESOURCES when the requested memory could not be fully 3543 * mapped within the given number of MTRRs. 3544 * 3545 * @param pVM The cross context VM structure. 3546 * @param GCPhysRegionFirst The guest-physical address in the region being 3547 * mapped. 3548 * @param cb The number of bytes being mapped. 3549 * @param pMtrrMap The variable-range MTRR map to populate. 3550 */ 3551 static int cpumR3MapMtrrsOptimal(PVM pVM, RTGCPHYS GCPhysRegionFirst, uint64_t cb, PCPUMMTRRMAP pMtrrMap) 3552 { 3553 Assert(pMtrrMap); 3554 Assert(pMtrrMap->cMtrrs > 1); 3555 Assert(cb >= _4K); 3556 Assert(!(GCPhysRegionFirst & X86_PAGE_4K_OFFSET_MASK)); 3557 3558 /* 3559 * Additive method. 3560 */ 3561 CPUMMTRRMAP MtrrMapAdd; 3562 RT_ZERO(MtrrMapAdd); 3563 MtrrMapAdd.cMtrrs = pMtrrMap->cMtrrs; 3564 MtrrMapAdd.cbToMap = cb; 3565 int rcAdd; 3566 { 3567 rcAdd = cpumR3MapMtrrsAdditive(pVM, GCPhysRegionFirst, cb, &MtrrMapAdd); 3568 if (RT_SUCCESS(rcAdd)) 3569 { 3570 Assert(MtrrMapAdd.idxMtrr > 0); 3571 Assert(MtrrMapAdd.idxMtrr <= MtrrMapAdd.cMtrrs); 3572 Assert(MtrrMapAdd.cbMapped == cb); 3573 Log3(("CPUM: MTRR: Mapped %u regions using additive method\n", MtrrMapAdd.idxMtrr)); 3574 3575 /* 3576 * If we were able to map memory using 2 or fewer MTRRs, don't bother with trying 3577 * to map using the subtractive method as that requires at least 2 MTRRs anyway. 3578 */ 3579 if (MtrrMapAdd.idxMtrr <= 2) 3580 return cpumR3MtrrMapAddMap(pVM, pMtrrMap, &MtrrMapAdd); 3581 } 3582 else 3583 Log3(("CPUM: MTRR: Partially mapped %u regions using additive method\n", MtrrMapAdd.idxMtrr)); 3584 } 3585 3586 /* 3587 * Subtractive method. 3588 */ 3589 CPUMMTRRMAP MtrrMapSub; 3590 RT_ZERO(MtrrMapSub); 3591 MtrrMapSub.cMtrrs = pMtrrMap->cMtrrs; 3592 MtrrMapSub.cbToMap = cb; 3593 int rcSub; 3594 { 3595 rcSub = cpumR3MapMtrrsSubtractive(pVM, GCPhysRegionFirst, cb, &MtrrMapSub); 3596 if (RT_SUCCESS(rcSub)) 3597 { 3598 Assert(MtrrMapSub.idxMtrr > 0); 3599 Assert(MtrrMapSub.idxMtrr <= MtrrMapSub.cMtrrs); 3600 Assert(MtrrMapSub.cbMapped == cb); 3601 Log3(("CPUM: MTRR: Mapped %u regions using subtractive method\n", MtrrMapSub.idxMtrr)); 3602 } 3603 else 3604 Log3(("CPUM: MTRR: Partially mapped %u regions using subtractive method\n", MtrrMapAdd.idxMtrr)); 3605 } 3606 3607 /* 3608 * Pick whichever method requires fewer MTRRs to map the memory. 3609 */ 3610 PCCPUMMTRRMAP pMtrrMapOptimal; 3611 if ( RT_SUCCESS(rcAdd) 3612 && RT_SUCCESS(rcSub)) 3613 { 3614 Assert(MtrrMapAdd.cbMapped == MtrrMapSub.cbMapped); 3615 if (MtrrMapSub.idxMtrr < MtrrMapAdd.idxMtrr) 3616 pMtrrMapOptimal = &MtrrMapSub; 3617 else 3618 pMtrrMapOptimal = &MtrrMapAdd; 3619 } 3620 else if (RT_SUCCESS(rcAdd)) 3621 pMtrrMapOptimal = &MtrrMapAdd; 3622 else if (RT_SUCCESS(rcSub)) 3623 pMtrrMapOptimal = &MtrrMapSub; 3624 else 3625 { 3626 /* 3627 * If both methods fail, use the additive method as it gives partially mapped 3628 * memory as opposed to memory that isn't present. 3629 */ 3630 pMtrrMapOptimal = &MtrrMapAdd; 3631 } 3632 3633 int const rc = cpumR3MtrrMapAddMap(pVM, pMtrrMap, pMtrrMapOptimal); 3634 if ( RT_SUCCESS(rc) 3635 && pMtrrMapOptimal->cbMapped == pMtrrMapOptimal->cbToMap) 3636 return rc; 3637 return VERR_OUT_OF_RESOURCES; 3638 } 3639 3640 3641 /** 3642 * Maps RAM above 4GB using variable-range MTRRs. 3643 * 3644 * @returns VBox status code. 3645 * @retval VINF_SUCCESS when the requested memory could be fully mapped within the 3646 * given number of MTRRs. 3647 * @retval VERR_OUT_OF_RESOURCES when the requested memory could not be fully 3648 * mapped within the given number of MTRRs. 3649 * 3650 * @param pVM The cross context VM structure. 3651 * @param cb The number of bytes above 4GB to map. 3652 * @param pMtrrMap The variable-range MTRR map to populate. 3653 */ 3654 static int cpumR3MapMtrrsAbove4GB(PVM pVM, uint64_t cb, PCPUMMTRRMAP pMtrrMap) 3655 { 3656 Assert(pMtrrMap); 3657 Assert(pMtrrMap->cMtrrs > 1); 3658 Assert(cb >= _4K); 3659 3660 /* 3661 * Until the remainder of the memory fits within 4GB, map regions at 3662 * incremental powers of two offsets and sizes. 3663 */ 3664 uint64_t cbLeft = cb; 3665 uint64_t offRegion = _4G; 3666 while (cbLeft > offRegion) 3667 { 3668 uint64_t const cbRegion = offRegion; 3669 3670 Log3(("CPUM: MTRR: [%u]: %' Rhcb (%RU64 bytes)\n", pMtrrMap->idxMtrr, cbRegion, cbRegion)); 3671 int const rc = cpumR3MtrrMapAddRegion(pVM, pMtrrMap, offRegion, offRegion + cbRegion - 1, X86_MTRR_MT_WB); 3672 if (RT_FAILURE(rc)) 3673 return rc; 3674 3675 offRegion <<= 1; 3676 cbLeft -= RT_MIN(cbRegion, cbLeft); 3677 } 3678 3679 /* 3680 * Optimally try and map any remaining memory smaller than 4GB. 3681 */ 3682 Assert(pMtrrMap->cMtrrs - pMtrrMap->idxMtrr > 0); 3683 Assert(cbLeft < _4G); 3684 return cpumR3MapMtrrsOptimal(pVM, offRegion, cbLeft, pMtrrMap); 3685 } 3686 3687 3688 /** 3689 * Maps guest RAM via MTRRs. 3690 * 3691 * @returns VBox status code. 3692 * @param pVM The cross context VM structure. 3693 */ 3694 static int cpumR3MapMtrrs(PVM pVM) 3695 { 3696 /* 3697 * The RAM size configured for the VM does NOT include the RAM hole! 3698 * We cannot make ANY assumptions about the RAM size or the RAM hole size 3699 * of the VM since it is configurable by the user. Hence, we must check for 3700 * atypical sizes. 3701 */ 3702 uint64_t cbRam; 3703 int rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam); 3704 if (RT_FAILURE(rc)) 3705 { 3706 LogRel(("CPUM: Cannot map RAM via MTRRs since the RAM size is not configured for the VM\n")); 3707 return VINF_SUCCESS; 3708 } 3709 3710 /* 3711 * Map the RAM below 1MB. 3712 */ 3713 if (cbRam >= _1M) 3714 { 3715 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 3716 { 3717 PCPUMCTXMSRS pCtxMsrs = &pVM->apCpusR3[idCpu]->cpum.s.GuestMsrs; 3718 pCtxMsrs->msr.MtrrFix64K_00000 = 0x0606060606060606; 3719 pCtxMsrs->msr.MtrrFix16K_80000 = 0x0606060606060606; 3720 pCtxMsrs->msr.MtrrFix16K_A0000 = 0; 3721 pCtxMsrs->msr.MtrrFix4K_C0000 = 0x0505050505050505; 3722 pCtxMsrs->msr.MtrrFix4K_C8000 = 0x0505050505050505; 3723 pCtxMsrs->msr.MtrrFix4K_D0000 = 0x0505050505050505; 3724 pCtxMsrs->msr.MtrrFix4K_D8000 = 0x0505050505050505; 3725 pCtxMsrs->msr.MtrrFix4K_E0000 = 0x0505050505050505; 3726 pCtxMsrs->msr.MtrrFix4K_E8000 = 0x0505050505050505; 3727 pCtxMsrs->msr.MtrrFix4K_F0000 = 0x0505050505050505; 3728 pCtxMsrs->msr.MtrrFix4K_F8000 = 0x0505050505050505; 3729 } 3730 LogRel(("CPUM: Mapped %' Rhcb (%RU64 bytes) of RAM using fixed-range MTRRs\n", _1M, _1M)); 3731 } 3732 else 3733 { 3734 LogRel(("CPUM: WARNING! Cannot map RAM via MTRRs since the RAM size is below 1 MiB\n")); 3735 return VINF_SUCCESS; 3736 } 3737 3738 if (cbRam > _1M + _4K) 3739 { /* likely */ } 3740 else 3741 { 3742 LogRel(("CPUM: WARNING! Cannot map RAM above 1M via MTRRs since the RAM size above 1M is below 4K\n")); 3743 return VINF_SUCCESS; 3744 } 3745 3746 /* 3747 * Check if there is at least 1 MTRR available in addition to MTRRs reserved 3748 * for use by software for mapping guest memory, see @bugref{10498#c34}. 3749 * 3750 * Intel Pentium Pro Processor's BIOS Writers Guide and our EFI code reserves 3751 * 2 MTRRs for use by software and thus we reserve the same here. 3752 */ 3753 uint8_t const cMtrrsMax = pVM->apCpusR3[0]->cpum.s.GuestMsrs.msr.MtrrCap & MSR_IA32_MTRR_CAP_VCNT_MASK; 3754 uint8_t const cMtrrsRsvd = 2; 3755 if (cMtrrsMax < cMtrrsRsvd + 1) 3756 { 3757 LogRel(("CPUM: WARNING! Variable-range MTRRs (%u) insufficient to map RAM since %u of them are reserved for software\n", 3758 cMtrrsMax, cMtrrsRsvd)); 3759 return VINF_SUCCESS; 3760 } 3761 3762 CPUMMTRRMAP MtrrMap; 3763 RT_ZERO(MtrrMap); 3764 uint8_t const cMtrrsMappable = cMtrrsMax - cMtrrsRsvd; 3765 Assert(cMtrrsMappable > 0); /* Paranoia. */ 3766 AssertLogRelMsgReturn(cMtrrsMappable <= RT_ELEMENTS(MtrrMap.aMtrrs), 3767 ("Mappable variable-range MTRRs (%u) exceed MTRRs available (%u)\n", cMtrrsMappable, 3768 RT_ELEMENTS(MtrrMap.aMtrrs)), 3769 VERR_CPUM_IPE_1); 3770 MtrrMap.cMtrrs = cMtrrsMappable; 3771 MtrrMap.cbToMap = cbRam; 3772 3773 /* 3774 * Get the RAM hole size configured for the VM. 3775 * Since MM has already validated it, we only debug assert the same constraints here. 3776 * 3777 * Although it is not required by the MTRR mapping code that the RAM hole size be a 3778 * power of 2, it is highly recommended to keep it this way in order to drastically 3779 * reduce the number of MTRRs used. 3780 */ 3781 uint32_t const cbRamHole = MMR3PhysGet4GBRamHoleSize(pVM); 3782 AssertMsg(cbRamHole <= 4032U * _1M, ("RAM hole size (%u bytes) is too large\n", cbRamHole)); 3783 AssertMsg(cbRamHole > 16 * _1M, ("RAM hole size (%u byets) is too small\n", cbRamHole)); 3784 AssertMsg(!(cbRamHole & (_4M - 1)), ("RAM hole size (%u bytes) must be 4MB aligned\n", cbRamHole)); 3785 3786 /* 3787 * Map the RAM (and RAM hole) below 4GB. 3788 */ 3789 uint64_t const cbBelow4GB = RT_MIN(cbRam, (uint64_t)_4G - cbRamHole); 3790 rc = cpumR3MapMtrrsOptimal(pVM, 0 /* GCPhysFirst */, cbBelow4GB, &MtrrMap); 3791 if (RT_SUCCESS(rc)) 3792 { 3793 Assert(MtrrMap.idxMtrr > 0); 3794 Assert(MtrrMap.idxMtrr <= MtrrMap.cMtrrs); 3795 Assert(MtrrMap.cbMapped == cbBelow4GB); 3796 3797 /* 3798 * Map the RAM above 4GB. 3799 */ 3800 uint64_t const cbAbove4GB = cbRam + cbRamHole > _4G ? cbRam + cbRamHole - _4G : 0; 3801 if (cbAbove4GB) 3802 { 3803 rc = cpumR3MapMtrrsAbove4GB(pVM, cbAbove4GB, &MtrrMap); 3804 if (RT_SUCCESS(rc)) 3805 Assert(MtrrMap.cbMapped == MtrrMap.cbToMap); 3806 } 3807 LogRel(("CPUM: Mapped %' Rhcb (%RU64 bytes) of RAM using %u variable-range MTRRs\n", MtrrMap.cbMapped, MtrrMap.cbMapped, 3808 MtrrMap.idxMtrr)); 3809 } 3810 3811 /* 3812 * Check if we ran out of MTRRs while mapping the memory. 3813 */ 3814 if (MtrrMap.cbMapped < cbRam) 3815 { 3816 Assert(rc == VERR_OUT_OF_RESOURCES); 3817 Assert(MtrrMap.idxMtrr == cMtrrsMappable); 3818 Assert(MtrrMap.idxMtrr == MtrrMap.cMtrrs); 3819 uint64_t const cbLost = cbRam - MtrrMap.cbMapped; 3820 LogRel(("CPUM: WARNING! Could not map %' Rhcb (%RU64 bytes) of RAM using %u variable-range MTRRs\n", cbLost, cbLost, 3821 MtrrMap.cMtrrs)); 3822 rc = VINF_SUCCESS; 3823 } 3824 3825 /* 3826 * Copy mapped MTRRs to all VCPUs. 3827 */ 3828 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 3829 { 3830 PCPUMCTXMSRS pCtxMsrs = &pVM->apCpusR3[idCpu]->cpum.s.GuestMsrs; 3831 Assert(sizeof(pCtxMsrs->msr.aMtrrVarMsrs) == sizeof(MtrrMap.aMtrrs)); 3832 memcpy(&pCtxMsrs->msr.aMtrrVarMsrs[0], &MtrrMap.aMtrrs[0], sizeof(MtrrMap.aMtrrs)); 3833 } 3834 3835 return rc; 3836 } 3837 3838 3839 /** 3219 3840 * Formats the EFLAGS value into mnemonics. 3220 3841 * … … 3627 4248 if (fIsValid) 3628 4249 { 3629 uint64_t const fInvPhysMask = ~(RT_BIT_64(pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U);3630 RTGCPHYS const GCPhysMask = pMtrrVar->MtrrPhysMask & X86_PAGE_BASE_MASK;3631 RTGCPHYS const GCPhysFirst = pMtrrVar->MtrrPhysBase & X86_PAGE_BASE_MASK;3632 RTGCPHYS const GCPhysLast = (GCPhysFirst | ~GCPhysMask) & ~fInvPhysMask;3633 Assert((GCPhysLast & GCPhysMask) == (GCPhysFirst & GCPhysMask));3634 Assert(((GCPhysLast + 1) & GCPhysMask) != (GCPhysFirst & GCPhysMask));4250 RTGCPHYS GCPhysFirst; 4251 RTGCPHYS GCPhysLast; 4252 cpumR3GetVarMtrrAddrs(pVM, pMtrrVar, &GCPhysFirst, &GCPhysLast); 4253 uint8_t const fType = pMtrrVar->MtrrPhysBase & MSR_IA32_MTRR_PHYSBASE_MT_MASK; 4254 const char *pszType = cpumR3GetVarMtrrMemType(fType); 4255 uint64_t const cbRange = GCPhysLast - GCPhysFirst + 1; 3635 4256 pHlp->pfnPrintf(pHlp, 3636 "%sMTRR_PHYSBASE[%2u] =%016RX64 First=%016RX64 \n"3637 "%sMTRR_PHYSMASK[%2u] =%016RX64 Last =%016RX64 \n",3638 pszPrefix, iRange, pMtrrVar->MtrrPhysBase, GCPhysFirst, 3639 pszPrefix, iRange, pMtrrVar->MtrrPhysMask, GCPhysLast );4257 "%sMTRR_PHYSBASE[%2u] =%016RX64 First=%016RX64 %6RU64 MB [%s]\n" 4258 "%sMTRR_PHYSMASK[%2u] =%016RX64 Last =%016RX64 %6RU64 MB [%RU64 MB]\n", 4259 pszPrefix, iRange, pMtrrVar->MtrrPhysBase, GCPhysFirst, GCPhysFirst / _1M, pszType, 4260 pszPrefix, iRange, pMtrrVar->MtrrPhysMask, GCPhysLast, GCPhysLast / _1M, cbRange / (uint64_t)_1M); 3640 4261 } 3641 4262 else … … 4610 5231 4611 5232 /** 4612 * Computes the variable-range MTRR physical address mask given an address range.4613 *4614 * @returns The MTRR physical address mask.4615 * @param pVM The cross context VM structure.4616 * @param GCPhysFirst The first guest-physical address of the memory range4617 * (inclusive).4618 * @param GCPhysLast The last guest-physical address of the memory range4619 * (inclusive).4620 */4621 static uint64_t cpumR3GetVarRangeMtrrMask(PVM pVM, RTGCPHYS GCPhysFirst, RTGCPHYS GCPhysLast)4622 {4623 RTGCPHYS const GCPhysLength = GCPhysLast - GCPhysFirst;4624 uint64_t const fInvPhysMask = ~(RT_BIT_64(pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U);4625 RTGCPHYS const GCPhysMask = (~(GCPhysLength - 1) & ~fInvPhysMask) & X86_PAGE_BASE_MASK;4626 #ifdef VBOX_STRICT4627 /* Paranoia. */4628 Assert(GCPhysLast == ((GCPhysFirst | ~GCPhysMask) & ~fInvPhysMask));4629 Assert((GCPhysLast & GCPhysMask) == (GCPhysFirst & GCPhysMask));4630 Assert(((GCPhysLast + 1) & GCPhysMask) != (GCPhysFirst & GCPhysMask));4631 #endif4632 return GCPhysMask;4633 }4634 4635 4636 /**4637 5233 * Called when the ring-3 init phase completes. 4638 5234 * … … 4684 5280 4685 5281 /* 4686 * InitializeMTRRs.5282 * Map guest RAM via MTRRs. 4687 5283 */ 4688 5284 if (pVM->cpum.s.fMtrrRead) 4689 5285 { 4690 uint64_t cbRam; 4691 CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0); 4692 AssertReturn(cbRam > _1M, VERR_CPUM_IPE_1); 4693 RTGCPHYS const GCPhysFirst = 0; 4694 RTGCPHYS const GCPhysLast = cbRam - 1; 4695 uint64_t const fMtrrPhysMask = cpumR3GetVarRangeMtrrMask(pVM, GCPhysFirst, GCPhysLast); 4696 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 4697 { 4698 PCPUMCTXMSRS pCtxMsrs = &pVM->apCpusR3[idCpu]->cpum.s.GuestMsrs; 4699 pCtxMsrs->msr.MtrrFix64K_00000 = 0x0606060606060606; 4700 pCtxMsrs->msr.MtrrFix16K_80000 = 0x0606060606060606; 4701 pCtxMsrs->msr.MtrrFix16K_A0000 = 0; 4702 pCtxMsrs->msr.MtrrFix4K_C0000 = 0x0505050505050505; 4703 pCtxMsrs->msr.MtrrFix4K_C8000 = 0x0505050505050505; 4704 pCtxMsrs->msr.MtrrFix4K_D0000 = 0x0505050505050505; 4705 pCtxMsrs->msr.MtrrFix4K_D8000 = 0x0505050505050505; 4706 pCtxMsrs->msr.MtrrFix4K_E0000 = 0x0505050505050505; 4707 pCtxMsrs->msr.MtrrFix4K_E8000 = 0x0505050505050505; 4708 pCtxMsrs->msr.MtrrFix4K_F0000 = 0x0505050505050505; 4709 pCtxMsrs->msr.MtrrFix4K_F8000 = 0x0505050505050505; 4710 //pCtxMsrs->msr.aMtrrVarMsrs[0].MtrrPhysBase = GCPhysFirst | X86_MTRR_MT_WB; 4711 //pCtxMsrs->msr.aMtrrVarMsrs[0].MtrrPhysMask = fMtrrPhysMask | MSR_IA32_MTRR_PHYSMASK_VALID; 4712 } 4713 LogRel(("CPUM: Initialized MTRRs (fMtrrPhysMask=%RGp GCPhysLast=%RGp)\n", fMtrrPhysMask, GCPhysLast)); 5286 int const rc = cpumR3MapMtrrs(pVM); 5287 if (RT_SUCCESS(rc)) 5288 { /* likely */ } 5289 else 5290 return rc; 4714 5291 } 4715 5292 break;
Note:
See TracChangeset
for help on using the changeset viewer.