Changeset 71755 in vbox
- Timestamp:
- Apr 9, 2018 8:10:23 AM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 121857
- Location:
- trunk
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r71643 r71755 1370 1370 #ifndef IN_RC 1371 1371 /** 1372 * Checks if the guest VMCB has the specified ctrl/instruction intercept active. 1372 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept 1373 * active. 1373 1374 * 1374 1375 * @returns @c true if in intercept is set, @c false otherwise. … … 1389 1390 1390 1391 /** 1391 * Checks if the guest VMCB has the specified CR read intercept active.1392 * Checks if the nested-guest VMCB has the specified CR read intercept active. 1392 1393 * 1393 1394 * @returns @c true if in intercept is set, @c false otherwise. … … 1408 1409 1409 1410 /** 1410 * Checks if the guest VMCB has the specified CR write intercept active.1411 * Checks if the nested-guest VMCB has the specified CR write intercept active. 1411 1412 * 1412 1413 * @returns @c true if in intercept is set, @c false otherwise. … … 1427 1428 1428 1429 /** 1429 * Checks if the guest VMCB has the specified DR read intercept active.1430 * Checks if the nested-guest VMCB has the specified DR read intercept active. 1430 1431 * 1431 1432 * @returns @c true if in intercept is set, @c false otherwise. … … 1446 1447 1447 1448 /** 1448 * Checks if the guest VMCB has the specified DR write intercept active.1449 * Checks if the nested-guest VMCB has the specified DR write intercept active. 1449 1450 * 1450 1451 * @returns @c true if in intercept is set, @c false otherwise. … … 1465 1466 1466 1467 /** 1467 * Checks if the guest VMCB has the specified exception intercept active.1468 * Checks if the nested-guest VMCB has the specified exception intercept active. 1468 1469 * 1469 1470 * @returns @c true if in intercept is active, @c false otherwise. … … 1484 1485 1485 1486 /** 1486 * Checks if the guest VMCB has virtual-interrupt masking enabled.1487 * Checks if the nested-guest VMCB has virtual-interrupt masking enabled. 1487 1488 * 1488 1489 * @returns @c true if virtual-interrupts are masked, @c false otherwise. … … 1499 1500 return pVmcb->ctrl.IntCtrl.n.u1VIntrMasking; 1500 1501 return HMIsGuestSvmVirtIntrMasking(pVCpu, pCtx); 1502 } 1503 1504 /** 1505 * Checks if the nested-guest VMCB has nested-paging enabled. 1506 * 1507 * @returns @c true if nested-paging is enabled, @c false otherwise. 1508 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 1509 * @param pCtx Pointer to the context. 1510 * 1511 * @remarks Should only be called when SVM feature is exposed to the guest. 1512 */ 1513 DECLINLINE(bool) CPUMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx) 1514 { 1515 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 1516 Assert(pVmcb); 1517 if (!pCtx->hwvirt.svm.fHMCachedVmcb) 1518 return pVmcb->ctrl.NestedPaging.n.u1NestedPaging; 1519 return HMIsGuestSvmNestedPagingEnabled(pVCpu, pCtx); 1501 1520 } 1502 1521 … … 1702 1721 uint64_t *puValidEfer); 1703 1722 VMMDECL(void) CPUMSetGuestMsrEferNoCheck(PVMCPU pVCpu, uint64_t uOldEfer, uint64_t uValidEfer); 1704 1723 VMMDECL(bool) CPUMIsPatMsrValid(uint64_t uValue); 1705 1724 1706 1725 /** @name Typical scalable bus frequency values. -
trunk/include/VBox/vmm/hm_svm.h
r71640 r71755 903 903 /** Offset 0x648-0x667 - Reserved. */ 904 904 uint8_t u8Reserved9[0x668 - 0x648]; 905 /** Offset 0x668 - G_PAT. */906 uint64_t u64 GPAT;905 /** Offset 0x668 - PAT (Page Attribute Table) MSR. */ 906 uint64_t u64PAT; 907 907 /** Offset 0x670 - DBGCTL. */ 908 908 uint64_t u64DBGCTL; … … 958 958 AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64CR2, 0x640 - 0x400); 959 959 AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved9, 0x648 - 0x400); 960 AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64 GPAT,0x668 - 0x400);960 AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64PAT, 0x668 - 0x400); 961 961 AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64DBGCTL, 0x670 - 0x400); 962 962 AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64BR_FROM, 0x678 - 0x400); … … 1051 1051 /** Cache of DBGCTL. */ 1052 1052 uint64_t u64DBGCTL; 1053 /** Cache of the PAT MSR. */ 1054 uint64_t u64PAT; 1053 1055 /** @} */ 1054 1056 … … 1138 1140 VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector); 1139 1141 VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu, PCCPUMCTX pCtx); 1142 VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx); 1140 1143 VMM_INT_DECL(bool) HMCanSvmNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx); 1141 1144 VMM_INT_DECL(bool) HMCanSvmNstGstTakeVirtIntr(PVMCPU pVCpu, PCCPUMCTX pCtx); -
trunk/include/iprt/x86.h
r70913 r71755 1236 1236 /** Page Attribute Table. */ 1237 1237 #define MSR_IA32_CR_PAT 0x277 1238 /** Default PAT MSR value on processor powerup / reset (see Intel spec. 11.12.4 1239 * "Programming the PAT", AMD spec. 7.8.2 "PAT Indexing") */ 1240 #define MSR_IA32_CR_PAT_INIT_VAL UINT64_C(0x0007040600070406) 1238 1241 1239 1242 /** Performance counter MSRs. (Intel only) */ -
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r71108 r71755 551 551 { 552 552 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue); 553 554 for (uint32_t cShift = 0; cShift < 63; cShift += 8) 553 if (CPUMIsPatMsrValid(uValue)) 555 554 { 556 /* Check all eight bits because the top 5 bits of each byte are reserved. */ 557 uint8_t uType = (uint8_t)(uValue >> cShift); 558 if ((uType >= 8) || (uType == 2) || (uType == 3)) 559 { 560 Log(("CPUM: Invalid PAT type at %u:%u in IA32_PAT: %#llx (%#llx)\n", 561 cShift + 7, cShift, uValue, uType)); 562 return VERR_CPUM_RAISE_GP_0; 563 } 555 pVCpu->cpum.s.Guest.msrPAT = uValue; 556 return VINF_SUCCESS; 564 557 } 565 566 pVCpu->cpum.s.Guest.msrPAT = uValue; 567 return VINF_SUCCESS; 558 return VERR_CPUM_RAISE_GP_0; 568 559 } 569 560 … … 6209 6200 6210 6201 /** 6202 * Checks if a guest PAT MSR write is valid. 6203 * 6204 * @returns @c true if the PAT bit combination is valid, @c false otherwise. 6205 * @param uValue The PAT MSR value. 6206 */ 6207 VMMDECL(bool) CPUMIsPatMsrValid(uint64_t uValue) 6208 { 6209 for (uint32_t cShift = 0; cShift < 63; cShift += 8) 6210 { 6211 /* Check all eight bits because the top 5 bits of each byte are reserved. */ 6212 uint8_t uType = (uint8_t)(uValue >> cShift); 6213 if ((uType >= 8) || (uType == 2) || (uType == 3)) 6214 { 6215 Log(("CPUM: Invalid PAT type at %u:%u in IA32_PAT: %#llx (%#llx)\n", cShift + 7, cShift, uValue, uType)); 6216 return false; 6217 } 6218 } 6219 return true; 6220 } 6221 6222 6223 /** 6211 6224 * Validates an EFER MSR write. 6212 6225 * -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r71640 r71755 132 132 * in IEM). 133 133 * 134 * @param pVCpu 135 * @param pCtx 134 * @param pVCpu The cross context virtual CPU structure. 135 * @param pCtx Pointer to the guest-CPU context. 136 136 * 137 137 * @sa hmR0SvmVmRunCacheVmcb. … … 169 169 pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = pNstGstVmcbCache->fVIntrMasking; 170 170 pVmcbNstGstCtrl->TLBCtrl = pNstGstVmcbCache->TLBCtrl; 171 172 /* 173 * If the nested-hypervisor isn't using nested-paging (and thus shadow paging 174 * is used by HM), we restore the original PAT MSR from the nested-guest VMCB. 175 * Otherwise, the nested-guest-CPU PAT MSR would've already been saved here by 176 * hardware-assisted SVM or by IEM. 177 */ 178 if (!pNstGstVmcbCache->u1NestedPaging) 179 pVmcbNstGstState->u64PAT = pNstGstVmcbCache->u64PAT; 180 171 181 pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging = pNstGstVmcbCache->u1NestedPaging; 172 182 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pNstGstVmcbCache->u1LbrVirt; … … 177 187 * Currently, VMRUN, #VMEXIT transitions involves trips to ring-3 that would flag a full 178 188 * CPU state change. However, if we exit to ring-3 in response to receiving a physical 179 * interrupt, we skip signaling any CPU state change as normally no change 180 * is done to the execution state (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3). 181 * However, with nested-guests, the state can change for e.g., we might perform a 182 * SVM_EXIT_INTR #VMEXIT for the nested-guest in ring-3. Hence we signal a full CPU 183 * state change here. 189 * interrupt, we skip signaling any CPU state change as normally no change is done to the 190 * execution state (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3). 191 * 192 * With nested-guests, the state can change on trip to ring-3 for e.g., we might perform a 193 * SVM_EXIT_INTR #VMEXIT for the nested-guest in ring-3. Hence we signal a full CPU state 194 * change here. 184 195 */ 185 196 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); … … 439 450 440 451 /** 441 * Checks if the guest VMCB has the specified ctrl/instruction intercept active. 452 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept 453 * active. 442 454 * 443 455 * @returns @c true if in intercept is set, @c false otherwise. … … 456 468 457 469 /** 458 * Checks if the guest VMCB has the specified CR read intercept active.470 * Checks if the nested-guest VMCB has the specified CR read intercept active. 459 471 * 460 472 * @returns @c true if in intercept is set, @c false otherwise. … … 473 485 474 486 /** 475 * Checks if the guest VMCB has the specified CR write intercept 476 * active. 487 * Checks if the nested-guest VMCB has the specified CR write intercept active. 477 488 * 478 489 * @returns @c true if in intercept is set, @c false otherwise. … … 491 502 492 503 /** 493 * Checks if the guest VMCB has the specified DR read intercept 494 * active. 504 * Checks if the nested-guest VMCB has the specified DR read intercept active. 495 505 * 496 506 * @returns @c true if in intercept is set, @c false otherwise. … … 509 519 510 520 /** 511 * Checks if the guest VMCB has the specified DR write intercept active.521 * Checks if the nested-guest VMCB has the specified DR write intercept active. 512 522 * 513 523 * @returns @c true if in intercept is set, @c false otherwise. … … 526 536 527 537 /** 528 * Checks if the guest VMCB has the specified exception intercept active.538 * Checks if the nested-guest VMCB has the specified exception intercept active. 529 539 * 530 540 * @returns true if in intercept is active, false otherwise. … … 543 553 544 554 /** 545 * Checks if the guest VMCB has virtual-interrupts masking enabled.555 * Checks if the nested-guest VMCB has virtual-interrupts masking enabled. 546 556 * 547 557 * @returns true if virtual-interrupts are masked, @c false otherwise. … … 554 564 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 555 565 return pVmcbNstGstCache->fVIntrMasking; 566 } 567 568 569 /** 570 * Checks if the nested-guest VMCB has nested-paging enabled. 571 * 572 * @returns true if nested-paging is enabled, @c false otherwise. 573 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 574 * @param pCtx Pointer to the context. 575 */ 576 VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx) 577 { 578 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx); 579 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 580 return RT_BOOL(pVmcbNstGstCache->u1NestedPaging); 556 581 } 557 582 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
r71530 r71755 152 152 pVmcbNstGstState->u8CPL = pCtx->ss.Attr.n.u2Dpl; /* See comment in CPUMGetGuestCPL(). */ 153 153 Assert(CPUMGetGuestCPL(pVCpu) == pCtx->ss.Attr.n.u2Dpl); 154 if (CPUMIsGuestSvmNestedPagingEnabled(pVCpu, pCtx)) 155 pVmcbNstGstState->u64PAT = pCtx->msrPAT; 154 156 155 157 PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; … … 180 182 else 181 183 pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0; 182 183 /** @todo NRIP. */184 184 185 185 /* Save exit information. */ … … 459 459 } 460 460 461 /** @todo gPAT MSR validation? */ 461 /* 462 * PAT (Page Attribute Table) MSR. 463 * 464 * The CPU only validates and loads it when nested-paging is enabled. 465 * See AMD spec. "15.25.4 Nested Paging and VMRUN/#VMEXIT". 466 */ 467 if ( pVmcbCtrl->NestedPaging.n.u1NestedPaging 468 && !CPUMIsPatMsrValid(pVmcbNstGst->u64PAT)) 469 { 470 Log(("iemSvmVmrun: PAT invalid. u64PAT=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64PAT)); 471 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 472 } 462 473 463 474 /* … … 614 625 pCtx->rip = pVmcbNstGst->u64RIP; 615 626 CPUMSetGuestMsrEferNoCheck(pVCpu, pCtx->msrEFER, uValidEfer); 627 if (pVmcbCtrl->NestedPaging.n.u1NestedPaging) 628 pCtx->msrPAT = pVmcbNstGst->u64PAT; 616 629 617 630 /* Mask DR6, DR7 bits mandatory set/clear bits. */ -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r71697 r71755 1001 1001 /* 1002 1002 * Setup the PAT MSR (applicable for Nested Paging only). 1003 * The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB, 1004 * so choose type 6 for all PAT slots. 1003 * 1004 * While guests can modify and see the modified values throug the shadow values, 1005 * we shall not honor any guest modifications of this MSR to ensure caching is always 1006 * enabled similar to how we always run with CR0.CD and NW bits cleared. 1005 1007 */ 1006 pVmcb->guest.u64 GPAT = UINT64_C(0x0006060606060606);1008 pVmcb->guest.u64PAT = MSR_IA32_CR_PAT_INIT_VAL; 1007 1009 1008 1010 /* Setup Nested Paging. This doesn't change throughout the execution time of the VM. */ … … 1754 1756 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK; 1755 1757 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; 1758 1759 /* We don't honor guest modifications to its PAT MSR (similar to ignoring CR0.CD, NW bits). */ 1756 1760 } 1757 1761 … … 2417 2421 pVmcbNstGstCache->u64CR4 = pVmcbNstGstState->u64CR4; 2418 2422 pVmcbNstGstCache->u64EFER = pVmcbNstGstState->u64EFER; 2423 pVmcbNstGstCache->u64PAT = pVmcbNstGstState->u64PAT; 2419 2424 pVmcbNstGstCache->u64DBGCTL = pVmcbNstGstState->u64DBGCTL; 2420 2425 pVmcbNstGstCache->u64IOPMPhysAddr = pVmcbNstGstCtrl->u64IOPMPhysAddr; … … 2470 2475 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcb->ctrl.LbrVirt.n.u1LbrVirt; 2471 2476 pVmcbNstGst->guest.u64DBGCTL = pVmcb->guest.u64DBGCTL; 2477 2478 /* Override nested-guest PAT MSR, see @bugref{7243#c109}. */ 2479 pVmcbNstGst->guest.u64PAT = MSR_IA32_CR_PAT_INIT_VAL; 2472 2480 } 2473 2481 else … … 3919 3927 Log4(("guest.u64SFMASK %#RX64\n", pVmcb->guest.u64SFMASK)); 3920 3928 Log4(("guest.u64KernelGSBase %#RX64\n", pVmcb->guest.u64KernelGSBase)); 3921 Log4(("guest.u64 GPAT %#RX64\n", pVmcb->guest.u64GPAT));3929 Log4(("guest.u64PAT %#RX64\n", pVmcb->guest.u64PAT)); 3922 3930 Log4(("guest.u64DBGCTL %#RX64\n", pVmcb->guest.u64DBGCTL)); 3923 3931 Log4(("guest.u64BR_FROM %#RX64\n", pVmcb->guest.u64BR_FROM)); -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r71443 r71755 1231 1231 */ 1232 1232 /* Init PAT MSR */ 1233 pCtx->msrPAT = UINT64_C(0x0007040600070406); /** @todo correct? */1233 pCtx->msrPAT = MSR_IA32_CR_PAT_INIT_VAL; 1234 1234 1235 1235 /* EFER MBZ; see AMD64 Architecture Programmer's Manual Volume 2: Table 14-1. Initial Processor State. … … 2359 2359 pHlp->pfnPrintf(pHlp, "%su64SysEnterESP = %#RX64\n", pszPrefix, pVmcbStateSave->u64SysEnterESP); 2360 2360 pHlp->pfnPrintf(pHlp, "%su64CR2 = %#RX64\n", pszPrefix, pVmcbStateSave->u64CR2); 2361 pHlp->pfnPrintf(pHlp, "%su64 GPAT = %#RX64\n", pszPrefix, pVmcbStateSave->u64GPAT);2361 pHlp->pfnPrintf(pHlp, "%su64PAT = %#RX64\n", pszPrefix, pVmcbStateSave->u64PAT); 2362 2362 pHlp->pfnPrintf(pHlp, "%su64DBGCTL = %#RX64\n", pszPrefix, pVmcbStateSave->u64DBGCTL); 2363 2363 pHlp->pfnPrintf(pHlp, "%su64BR_FROM = %#RX64\n", pszPrefix, pVmcbStateSave->u64BR_FROM); -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r71415 r71755 3534 3534 rc = SSMR3PutU32(pSSM, pPatch->cFaults); 3535 3535 AssertRCReturn(rc, rc); 3536 /** @todo We need to save SVMNESTEDVMCBCACHE (if pCtx fHMCached is true as we 3537 * are in nested-geust execution and the cache contains pristine 3538 * fields that we only restore on #VMEXIT and not on 3539 * every exit-to-ring 3. */ 3536 3540 } 3537 3541 #endif … … 3553 3557 int rc; 3554 3558 3555 Log (("hmR3Load:\n"));3559 LogFlowFunc(("uVersion=%u\n", uVersion)); 3556 3560 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass); 3557 3561
Note:
See TracChangeset
for help on using the changeset viewer.