Changeset 71755 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Apr 9, 2018 8:10:23 AM (7 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r71108 r71755 551 551 { 552 552 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue); 553 554 for (uint32_t cShift = 0; cShift < 63; cShift += 8) 553 if (CPUMIsPatMsrValid(uValue)) 555 554 { 556 /* Check all eight bits because the top 5 bits of each byte are reserved. */ 557 uint8_t uType = (uint8_t)(uValue >> cShift); 558 if ((uType >= 8) || (uType == 2) || (uType == 3)) 559 { 560 Log(("CPUM: Invalid PAT type at %u:%u in IA32_PAT: %#llx (%#llx)\n", 561 cShift + 7, cShift, uValue, uType)); 562 return VERR_CPUM_RAISE_GP_0; 563 } 555 pVCpu->cpum.s.Guest.msrPAT = uValue; 556 return VINF_SUCCESS; 564 557 } 565 566 pVCpu->cpum.s.Guest.msrPAT = uValue; 567 return VINF_SUCCESS; 558 return VERR_CPUM_RAISE_GP_0; 568 559 } 569 560 … … 6209 6200 6210 6201 /** 6202 * Checks if a guest PAT MSR write is valid. 6203 * 6204 * @returns @c true if the PAT bit combination is valid, @c false otherwise. 6205 * @param uValue The PAT MSR value. 6206 */ 6207 VMMDECL(bool) CPUMIsPatMsrValid(uint64_t uValue) 6208 { 6209 for (uint32_t cShift = 0; cShift < 63; cShift += 8) 6210 { 6211 /* Check all eight bits because the top 5 bits of each byte are reserved. */ 6212 uint8_t uType = (uint8_t)(uValue >> cShift); 6213 if ((uType >= 8) || (uType == 2) || (uType == 3)) 6214 { 6215 Log(("CPUM: Invalid PAT type at %u:%u in IA32_PAT: %#llx (%#llx)\n", cShift + 7, cShift, uValue, uType)); 6216 return false; 6217 } 6218 } 6219 return true; 6220 } 6221 6222 6223 /** 6211 6224 * Validates an EFER MSR write. 6212 6225 * -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r71640 r71755 132 132 * in IEM). 133 133 * 134 * @param pVCpu 135 * @param pCtx 134 * @param pVCpu The cross context virtual CPU structure. 135 * @param pCtx Pointer to the guest-CPU context. 136 136 * 137 137 * @sa hmR0SvmVmRunCacheVmcb. … … 169 169 pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = pNstGstVmcbCache->fVIntrMasking; 170 170 pVmcbNstGstCtrl->TLBCtrl = pNstGstVmcbCache->TLBCtrl; 171 172 /* 173 * If the nested-hypervisor isn't using nested-paging (and thus shadow paging 174 * is used by HM), we restore the original PAT MSR from the nested-guest VMCB. 175 * Otherwise, the nested-guest-CPU PAT MSR would've already been saved here by 176 * hardware-assisted SVM or by IEM. 177 */ 178 if (!pNstGstVmcbCache->u1NestedPaging) 179 pVmcbNstGstState->u64PAT = pNstGstVmcbCache->u64PAT; 180 171 181 pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging = pNstGstVmcbCache->u1NestedPaging; 172 182 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pNstGstVmcbCache->u1LbrVirt; … … 177 187 * Currently, VMRUN, #VMEXIT transitions involves trips to ring-3 that would flag a full 178 188 * CPU state change. However, if we exit to ring-3 in response to receiving a physical 179 * interrupt, we skip signaling any CPU state change as normally no change 180 * is done to the execution state (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3). 181 * However, with nested-guests, the state can change for e.g., we might perform a 182 * SVM_EXIT_INTR #VMEXIT for the nested-guest in ring-3. Hence we signal a full CPU 183 * state change here. 189 * interrupt, we skip signaling any CPU state change as normally no change is done to the 190 * execution state (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3). 191 * 192 * With nested-guests, the state can change on trip to ring-3 for e.g., we might perform a 193 * SVM_EXIT_INTR #VMEXIT for the nested-guest in ring-3. Hence we signal a full CPU state 194 * change here. 184 195 */ 185 196 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); … … 439 450 440 451 /** 441 * Checks if the guest VMCB has the specified ctrl/instruction intercept active. 452 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept 453 * active. 442 454 * 443 455 * @returns @c true if in intercept is set, @c false otherwise. … … 456 468 457 469 /** 458 * Checks if the guest VMCB has the specified CR read intercept active.470 * Checks if the nested-guest VMCB has the specified CR read intercept active. 459 471 * 460 472 * @returns @c true if in intercept is set, @c false otherwise. … … 473 485 474 486 /** 475 * Checks if the guest VMCB has the specified CR write intercept 476 * active. 487 * Checks if the nested-guest VMCB has the specified CR write intercept active. 477 488 * 478 489 * @returns @c true if in intercept is set, @c false otherwise. … … 491 502 492 503 /** 493 * Checks if the guest VMCB has the specified DR read intercept 494 * active. 504 * Checks if the nested-guest VMCB has the specified DR read intercept active. 495 505 * 496 506 * @returns @c true if in intercept is set, @c false otherwise. … … 509 519 510 520 /** 511 * Checks if the guest VMCB has the specified DR write intercept active.521 * Checks if the nested-guest VMCB has the specified DR write intercept active. 512 522 * 513 523 * @returns @c true if in intercept is set, @c false otherwise. … … 526 536 527 537 /** 528 * Checks if the guest VMCB has the specified exception intercept active.538 * Checks if the nested-guest VMCB has the specified exception intercept active. 529 539 * 530 540 * @returns true if in intercept is active, false otherwise. … … 543 553 544 554 /** 545 * Checks if the guest VMCB has virtual-interrupts masking enabled.555 * Checks if the nested-guest VMCB has virtual-interrupts masking enabled. 546 556 * 547 557 * @returns true if virtual-interrupts are masked, @c false otherwise. … … 554 564 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 555 565 return pVmcbNstGstCache->fVIntrMasking; 566 } 567 568 569 /** 570 * Checks if the nested-guest VMCB has nested-paging enabled. 571 * 572 * @returns true if nested-paging is enabled, @c false otherwise. 573 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 574 * @param pCtx Pointer to the context. 575 */ 576 VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx) 577 { 578 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx); 579 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 580 return RT_BOOL(pVmcbNstGstCache->u1NestedPaging); 556 581 } 557 582 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
r71530 r71755 152 152 pVmcbNstGstState->u8CPL = pCtx->ss.Attr.n.u2Dpl; /* See comment in CPUMGetGuestCPL(). */ 153 153 Assert(CPUMGetGuestCPL(pVCpu) == pCtx->ss.Attr.n.u2Dpl); 154 if (CPUMIsGuestSvmNestedPagingEnabled(pVCpu, pCtx)) 155 pVmcbNstGstState->u64PAT = pCtx->msrPAT; 154 156 155 157 PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; … … 180 182 else 181 183 pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0; 182 183 /** @todo NRIP. */184 184 185 185 /* Save exit information. */ … … 459 459 } 460 460 461 /** @todo gPAT MSR validation? */ 461 /* 462 * PAT (Page Attribute Table) MSR. 463 * 464 * The CPU only validates and loads it when nested-paging is enabled. 465 * See AMD spec. "15.25.4 Nested Paging and VMRUN/#VMEXIT". 466 */ 467 if ( pVmcbCtrl->NestedPaging.n.u1NestedPaging 468 && !CPUMIsPatMsrValid(pVmcbNstGst->u64PAT)) 469 { 470 Log(("iemSvmVmrun: PAT invalid. u64PAT=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64PAT)); 471 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 472 } 462 473 463 474 /* … … 614 625 pCtx->rip = pVmcbNstGst->u64RIP; 615 626 CPUMSetGuestMsrEferNoCheck(pVCpu, pCtx->msrEFER, uValidEfer); 627 if (pVmcbCtrl->NestedPaging.n.u1NestedPaging) 628 pCtx->msrPAT = pVmcbNstGst->u64PAT; 616 629 617 630 /* Mask DR6, DR7 bits mandatory set/clear bits. */
Note:
See TracChangeset
for help on using the changeset viewer.