- Timestamp:
- Dec 21, 2017 6:52:11 AM (7 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r70178 r70258 363 363 pVmcbNstGstState->u64CR4 = pNstGstVmcbCache->u64CR4; 364 364 pVmcbNstGstState->u64EFER = pNstGstVmcbCache->u64EFER; 365 pVmcbNstGstCtrl->u 64VmcbCleanBits = pNstGstVmcbCache->u64VmcbCleanBits;365 pVmcbNstGstCtrl->u32VmcbCleanBits = pNstGstVmcbCache->u32VmcbCleanBits; 366 366 pVmcbNstGstCtrl->u64IOPMPhysAddr = pNstGstVmcbCache->u64IOPMPhysAddr; 367 367 pVmcbNstGstCtrl->u64MSRPMPhysAddr = pNstGstVmcbCache->u64MSRPMPhysAddr; -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r70254 r70258 777 777 ASMBitClear(pbMsrBitmap, uMsrpmBit + 1); 778 778 779 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;779 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 780 780 } 781 781 … … 862 862 863 863 /* Initially set all VMCB clean bits to 0 indicating that everything should be loaded from the VMCB in memory. */ 864 pVmcb->ctrl.u 64VmcbCleanBits = 0;864 pVmcb->ctrl.u32VmcbCleanBits = 0; 865 865 866 866 /* The host ASID MBZ, for the guest start with 1. */ … … 1049 1049 1050 1050 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */ 1051 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;1051 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP; 1052 1052 1053 1053 /* Keep track of last CPU ID even when flushing all the time. */ … … 1061 1061 { 1062 1062 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */ 1063 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;1063 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP; 1064 1064 1065 1065 if (fNewAsid) … … 1102 1102 { 1103 1103 pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid; 1104 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;1104 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID; 1105 1105 } 1106 1106 … … 1223 1223 { 1224 1224 pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(u32Xcpt); 1225 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;1225 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1226 1226 } 1227 1227 } … … 1263 1263 { 1264 1264 pVmcb->ctrl.u32InterceptXcpt &= ~RT_BIT(u32Xcpt); 1265 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;1265 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1266 1266 } 1267 1267 } … … 1338 1338 1339 1339 pVmcb->guest.u64CR0 = u64GuestCR0; 1340 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;1340 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1341 1341 } 1342 1342 … … 1362 1362 { 1363 1363 pVmcb->guest.u64CR2 = pCtx->cr2; 1364 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;1364 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2; 1365 1365 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2); 1366 1366 } … … 1382 1382 1383 1383 pVmcb->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode); 1384 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;1384 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP; 1385 1385 Assert(pVmcb->ctrl.u64NestedPagingCR3); 1386 1386 pVmcb->guest.u64CR3 = pCtx->cr3; … … 1392 1392 } 1393 1393 1394 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;1394 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1395 1395 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3); 1396 1396 } … … 1439 1439 1440 1440 pVmcb->guest.u64CR4 = u64GuestCR4; 1441 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;1441 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1442 1442 1443 1443 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */ … … 1474 1474 1475 1475 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl; 1476 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;1476 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG; 1477 1477 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); 1478 1478 } … … 1497 1497 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt; 1498 1498 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt; 1499 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;1499 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT; 1500 1500 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR); 1501 1501 } … … 1506 1506 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt; 1507 1507 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt; 1508 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;1508 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT; 1509 1509 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR); 1510 1510 } … … 1536 1536 { 1537 1537 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME; 1538 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;1538 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1539 1539 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR); 1540 1540 } … … 1552 1552 { 1553 1553 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME; 1554 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;1554 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1555 1555 } 1556 1556 } … … 1629 1629 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu); 1630 1630 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL; 1631 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;1631 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 1632 1632 pVCpu->hm.s.fUsingHyperDR7 = true; 1633 1633 } … … 1648 1648 pVmcb->guest.u64DR7 = pCtx->dr[7]; 1649 1649 pVmcb->guest.u64DR6 = pCtx->dr[6]; 1650 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;1650 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 1651 1651 pVCpu->hm.s.fUsingHyperDR7 = false; 1652 1652 } … … 1705 1705 pVmcb->ctrl.u16InterceptRdDRx = 0xffff; 1706 1706 pVmcb->ctrl.u16InterceptWrDRx = 0xffff; 1707 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;1707 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1708 1708 } 1709 1709 } … … 1715 1715 pVmcb->ctrl.u16InterceptRdDRx = 0; 1716 1716 pVmcb->ctrl.u16InterceptWrDRx = 0; 1717 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;1717 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1718 1718 } 1719 1719 } … … 1736 1736 pVmcbNstGst->ctrl.IntCtrl.n.u1VIntrMasking = 1; 1737 1737 pVCpu->hm.s.svm.fSyncVTpr = false; 1738 pVmcbNstGst->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_TPR;1738 pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_TPR; 1739 1739 1740 1740 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE); … … 1800 1800 } 1801 1801 1802 pVmcb->ctrl.u 64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);1802 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 1803 1803 } 1804 1804 } … … 1897 1897 1898 1898 /* Finally, update the VMCB clean bits. */ 1899 pVmcbNstGst->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;1899 pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1900 1900 1901 1901 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS)); … … 2157 2157 pNstGstVmcbCache->u64MSRPMPhysAddr = pVmcbNstGstCtrl->u64MSRPMPhysAddr; 2158 2158 pNstGstVmcbCache->u64TSCOffset = pVmcbNstGstCtrl->u64TSCOffset; 2159 pNstGstVmcbCache->u 64VmcbCleanBits = pVmcbNstGstCtrl->u64VmcbCleanBits;2159 pNstGstVmcbCache->u32VmcbCleanBits = pVmcbNstGstCtrl->u32VmcbCleanBits; 2160 2160 pNstGstVmcbCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking; 2161 2161 pNstGstVmcbCache->TLBCtrl = pVmcbNstGstCtrl->TLBCtrl; … … 2760 2760 2761 2761 /* Finally update the VMCB clean bits since we touched the intercepts as well as the TSC offset. */ 2762 pVmcbNstGstCtrl->u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;2762 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 2763 2763 2764 2764 if (fParavirtTsc) … … 2798 2798 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept); 2799 2799 } 2800 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;2800 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 2801 2801 2802 2802 /** @todo later optimize this to be done elsewhere and not before every … … 3146 3146 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1; 3147 3147 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VINTR; 3148 pVmcb->ctrl.u 64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);3148 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 3149 3149 Log4(("Set VINTR intercept\n")); 3150 3150 } … … 3166 3166 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 0; 3167 3167 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR; 3168 pVmcb->ctrl.u 64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);3168 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 3169 3169 Log4(("Cleared VINTR intercept\n")); 3170 3170 } … … 3184 3184 { 3185 3185 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_IRET; 3186 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;3186 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 3187 3187 3188 3188 Log4(("Setting IRET intercept\n")); … … 3201 3201 { 3202 3202 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_IRET; 3203 pVmcb->ctrl.u 64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);3203 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS); 3204 3204 3205 3205 Log4(("Clearing IRET intercept\n")); … … 3520 3520 hmR0DumpRegs(pVM, pVCpu, pCtx); NOREF(pVM); 3521 3521 #ifdef VBOX_STRICT 3522 Log4(("ctrl.u 64VmcbCleanBits %#RX64\n", pVmcb->ctrl.u64VmcbCleanBits));3522 Log4(("ctrl.u32VmcbCleanBits %#RX32\n", pVmcb->ctrl.u32VmcbCleanBits)); 3523 3523 Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx)); 3524 3524 Log4(("ctrl.u16InterceptWrCRx %#x\n", pVmcb->ctrl.u16InterceptWrCRx)); … … 4033 4033 /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */ 4034 4034 if (idCurrentCpu != pVCpu->hm.s.idLastCpu) 4035 pVmcbNstGst->ctrl.u 64VmcbCleanBits = 0;4035 pVmcbNstGst->ctrl.u32VmcbCleanBits = 0; 4036 4036 4037 4037 /* Store status of the shared guest-host state at the time of VMRUN. */ … … 4089 4089 bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pCtx); 4090 4090 if (!fSupportsVmcbCleanBits) 4091 pVmcbNstGst->ctrl.u 64VmcbCleanBits = 0;4091 pVmcbNstGst->ctrl.u32VmcbCleanBits = 0; 4092 4092 } 4093 4093 #endif … … 4145 4145 /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */ 4146 4146 if (idCurrentCpu != pVCpu->hm.s.idLastCpu) 4147 pVmcb->ctrl.u 64VmcbCleanBits = 0;4147 pVmcb->ctrl.u32VmcbCleanBits = 0; 4148 4148 4149 4149 /* Store status of the shared guest-host state at the time of VMRUN. */ … … 4198 4198 bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pCtx); 4199 4199 if (!fSupportsVmcbCleanBits) 4200 pVmcb->ctrl.u 64VmcbCleanBits = 0;4200 pVmcb->ctrl.u32VmcbCleanBits = 0; 4201 4201 } 4202 4202 … … 4310 4310 4311 4311 /* Mark the VMCB-state cache as unmodified by VMM. */ 4312 pVmcbNstGstCtrl->u 64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;4312 pVmcbNstGstCtrl->u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; 4313 4313 4314 4314 /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */ … … 4354 4354 4355 4355 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 4356 pVmcb->ctrl.u 64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */4356 pVmcb->ctrl.u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */ 4357 4357 4358 4358 /* TSC read must be done early for maximum accuracy. */ … … 6512 6512 pVmcb->ctrl.u16InterceptRdDRx = 0; 6513 6513 pVmcb->ctrl.u16InterceptWrDRx = 0; 6514 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;6514 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 6515 6515 6516 6516 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */ … … 6763 6763 pVmcb->guest.u64DR6 = pCtx->dr[6]; 6764 6764 pVmcb->guest.u64DR7 = pCtx->dr[7]; 6765 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;6765 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 6766 6766 hmR0SvmSetPendingXcptDB(pVCpu); 6767 6767 } … … 7323 7323 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> %Rrc\n", pVmcb->guest.u64DR6, rc)); 7324 7324 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL; 7325 pVmcb->ctrl.u 64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;7325 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 7326 7326 } 7327 7327 else -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r70254 r70258 3667 3667 pHlp->pfnPrintf(pHlp, "%su64NestedPagingCR3 = %#RX64\n", pszPrefix, pVmcbCtrl->u64NestedPagingCR3); 3668 3668 pHlp->pfnPrintf(pHlp, "%su64LBRVirt = %#RX64\n", pszPrefix, pVmcbCtrl->u64LBRVirt); 3669 pHlp->pfnPrintf(pHlp, "%su 64VmcbCleanBits = %#RX64\n", pszPrefix, pVmcbCtrl->u64VmcbCleanBits);3669 pHlp->pfnPrintf(pHlp, "%su32VmcbCleanBits = %#RX32\n", pszPrefix, pVmcbCtrl->u32VmcbCleanBits); 3670 3670 pHlp->pfnPrintf(pHlp, "%su64NextRIP = %#RX64\n", pszPrefix, pVmcbCtrl->u64NextRIP); 3671 3671 pHlp->pfnPrintf(pHlp, "%scbInstrFetched = %u\n", pszPrefix, pVmcbCtrl->cbInstrFetched);
Note:
See TracChangeset
for help on using the changeset viewer.