Changeset 100935 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Aug 22, 2023 9:30:06 AM (16 months ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllCpuId.cpp
r100854 r100935 1446 1446 pFeatures->fTsc = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_TSC); 1447 1447 pFeatures->fSysEnter = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_SEP); 1448 pFeatures->fMtrr = RT_BOOL(pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_MTRR); 1448 1449 pFeatures->fHypervisorPresent = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_HVP); 1449 1450 pFeatures->fMonitorMWait = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR); -
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r98103 r100935 439 439 * @param pVCpu The cross context per CPU structure. 440 440 */ 441 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32MtrrCap(PCVMCPU pVCpu) 442 { 443 RT_NOREF_PV(pVCpu); 441 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32MtrrCap(PCVMCPUCC pVCpu) 442 { 443 if (pVCpu->CTX_SUFF(pVM)->cpum.s.fMtrrRead) 444 return pVCpu->cpum.s.GuestMsrs.msr.MtrrCap; 444 445 445 446 /* This is currently a bit weird. :-) */ … … 448 449 bool const fFixedRangeRegisters = false; 449 450 bool const fWriteCombiningType = false; 451 bool const fProcRsvdRangeRegisters = false; 450 452 return cVariableRangeRegs 451 | (fFixedRangeRegisters ? RT_BIT_64(8) : 0) 452 | (fWriteCombiningType ? RT_BIT_64(10) : 0) 453 | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0); 453 | (fFixedRangeRegisters ? MSR_IA32_MTRR_CAP_FIX : 0) 454 | (fWriteCombiningType ? MSR_IA32_MTRR_CAP_WC : 0) 455 | (fSystemManagementRangeRegisters ? MSR_IA32_MTRR_CAP_SMRR : 0) 456 | (fProcRsvdRangeRegisters ? MSR_IA32_MTRR_CAP_PRMRR : 0); 454 457 } 455 458 … … 467 470 { 468 471 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 469 /** @todo Implement variable MTRR storage. */ 470 Assert(pRange->uValue == (idMsr - 0x200) / 2); 471 *puValue = 0; 472 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 473 Assert(pRange->uValue == (idMsr - MSR_IA32_MTRR_PHYSBASE0) / 2); 474 if (pVCpu->CTX_SUFF(pVM)->cpum.s.fMtrrRead) 475 { 476 AssertLogRelMsgReturn(pRange->uValue < RT_ELEMENTS(pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs), 477 ("MTRR MSR (%#RX32) out-of-bounds, must be <= %#RX32\n", idMsr, CPUMCTX_MAX_MTRRVAR_COUNT), 478 VERR_CPUM_RAISE_GP_0); 479 AssertLogRelMsgReturn(!(idMsr % 2), 480 ("MTRR MSR (%#RX32) invalid, must be at even offset\n", idMsr), VERR_CPUM_RAISE_GP_0); 481 *puValue = pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs[pRange->uValue].MtrrPhysBase; 482 } 483 else 484 *puValue = 0; 472 485 return VINF_SUCCESS; 473 486 } … … 480 493 * Validate the value. 481 494 */ 482 Assert(pRange->uValue == (idMsr - 0x200) / 2); 483 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(uRawValue); RT_NOREF_PV(pRange); 495 Assert(pRange->uValue == (idMsr - MSR_IA32_MTRR_PHYSBASE0) / 2); 496 RT_NOREF_PV(uRawValue); 497 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 484 498 485 499 uint8_t uType = uValue & 0xff; … … 501 515 * Store it. 502 516 */ 503 /** @todo Implement variable MTRR storage. */ 517 if (pVCpu->CTX_SUFF(pVM)->cpum.s.fMtrrWrite) 518 { 519 AssertCompile(CPUMCTX_MAX_MTRRVAR_COUNT == RT_ELEMENTS(pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs)); 520 AssertLogRelMsgReturn(pRange->uValue < CPUMCTX_MAX_MTRRVAR_COUNT, 521 ("MTRR MSR (%#RX32) out-of-bounds, must be <= %#RX32\n", idMsr, CPUMCTX_MAX_MTRRVAR_COUNT), 522 VERR_CPUM_RAISE_GP_0); 523 AssertLogRelMsgReturn(!(idMsr % 2), 524 ("MTRR MSR (%#RX32) invalid, must be at even offset\n", idMsr), VERR_CPUM_RAISE_GP_0); 525 pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs[pRange->uValue].MtrrPhysBase = uValue; 526 /** @todo Act on the potential memory type change. */ 527 } 504 528 return VINF_SUCCESS; 505 529 } … … 509 533 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32MtrrPhysMaskN(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) 510 534 { 511 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 512 /** @todo Implement variable MTRR storage. */ 513 Assert(pRange->uValue == (idMsr - 0x200) / 2); 514 *puValue = 0; 535 RT_NOREF_PV(idMsr); 536 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 537 Assert(pRange->uValue == (idMsr - MSR_IA32_MTRR_PHYSBASE0) / 2); 538 if (pVCpu->CTX_SUFF(pVM)->cpum.s.fMtrrRead) 539 { 540 AssertLogRelMsgReturn(pRange->uValue < RT_ELEMENTS(pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs), 541 ("MTRR MSR (%#RX32) out-of-bounds, must be <= %#RX32\n", idMsr, CPUMCTX_MAX_MTRRVAR_COUNT), 542 VERR_CPUM_RAISE_GP_0); 543 AssertLogRelMsgReturn(idMsr % 2, 544 ("MTRR MSR (%#RX32) invalid, must be at odd offset\n", idMsr), VERR_CPUM_RAISE_GP_0); 545 *puValue = pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs[pRange->uValue].MtrrPhysMask; 546 } 547 else 548 *puValue = 0; 515 549 return VINF_SUCCESS; 516 550 } … … 523 557 * Validate the value. 524 558 */ 525 Assert(pRange->uValue == (idMsr - 0x200) / 2);559 Assert(pRange->uValue == (idMsr - MSR_IA32_MTRR_PHYSBASE0) / 2); 526 560 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(uRawValue); RT_NOREF_PV(pRange); 561 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 527 562 528 563 uint64_t fInvPhysMask = ~(RT_BIT_64(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U); … … 537 572 * Store it. 538 573 */ 539 /** @todo Implement variable MTRR storage. */ 574 if (pVCpu->CTX_SUFF(pVM)->cpum.s.fMtrrWrite) 575 { 576 AssertLogRelMsgReturn(pRange->uValue < RT_ELEMENTS(pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs), 577 ("MTRR MSR (%#RX32) out-of-bounds, must be <= %#RX32\n", idMsr, CPUMCTX_MAX_MTRRVAR_COUNT), 578 VERR_CPUM_RAISE_GP_0); 579 AssertLogRelMsgReturn(idMsr % 2, 580 ("MTRR MSR (%#RX32) invalid, must be at odd offset\n", idMsr), VERR_CPUM_RAISE_GP_0); 581 pVCpu->cpum.s.GuestMsrs.msr.aMtrrVarMsrs[pRange->uValue].MtrrPhysMask = uValue; 582 /** @todo Act on the potential memory type change. */ 583 } 540 584 return VINF_SUCCESS; 541 585 } … … 547 591 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 548 592 CPUM_MSR_ASSERT_CPUMCPU_OFFSET_RETURN(pVCpu, pRange, uint64_t, puFixedMtrr); 593 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 549 594 *puValue = *puFixedMtrr; 550 595 return VINF_SUCCESS; … … 557 602 CPUM_MSR_ASSERT_CPUMCPU_OFFSET_RETURN(pVCpu, pRange, uint64_t, puFixedMtrr); 558 603 RT_NOREF_PV(idMsr); RT_NOREF_PV(uRawValue); 604 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 559 605 560 606 for (uint32_t cShift = 0; cShift < 63; cShift += 8) … … 577 623 { 578 624 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 625 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 579 626 *puValue = pVCpu->cpum.s.GuestMsrs.msr.MtrrDefType; 580 627 return VINF_SUCCESS; … … 586 633 { 587 634 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue); 588 589 uint8_t uType = uValue & 0xff; 635 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.fMtrr); 636 637 uint8_t uType = uValue & MSR_IA32_MTRR_DEF_TYPE_DEF_MT_MASK; 590 638 if ((uType >= 7) || (uType == 2) || (uType == 3)) 591 639 {
Note:
See TracChangeset
for help on using the changeset viewer.