- Timestamp:
- Nov 28, 2013 5:30:55 PM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r49700 r49701 209 209 uint32_t u32Alignment0; 210 210 #endif 211 /** The guest's LSTAR MSR value used for TPR patching for 32-bit guests. */212 uint64_t u64LStarMsr;213 211 /** The guest's TPR value used for TPR shadowing. */ 214 212 uint8_t u8GuestTpr; … … 1287 1285 && fUpdateHostMsr) 1288 1286 { 1287 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 1288 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1289 1289 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr); 1290 1290 } … … 1412 1412 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1413 1413 1414 #define VMXLOCAL_SAVE_HOST_MSR(uMsr, a_HostMsrField, RestoreFlag) \1415 do { \1416 if (!(pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag))) \1417 { \1418 pVCpu->hm.s.vmx.u64Host##a_HostMsrField = ASMRdMsr(uMsr); \1419 } \1420 } while (0)1421 1422 1414 /* 1423 1415 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls(). 1424 1416 */ 1425 VMXLOCAL_SAVE_HOST_MSR(MSR_K8_LSTAR, LStarMsr, VMX_RESTORE_HOST_MSR_LSTAR); 1426 VMXLOCAL_SAVE_HOST_MSR(MSR_K6_STAR, StarMsr, VMX_RESTORE_HOST_MSR_STAR); 1427 VMXLOCAL_SAVE_HOST_MSR(MSR_K8_SF_MASK, SFMaskMsr, VMX_RESTORE_HOST_MSR_SFMASK); 1428 VMXLOCAL_SAVE_HOST_MSR(MSR_K8_KERNEL_GS_BASE, KernelGSBaseMsr, VMX_RESTORE_HOST_MSR_KERNELGSBASE); 1429 1430 #undef VMXLOCAL_SAVE_HOST_MSR 1417 if (!(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST)) 1418 { 1419 pVCpu->hm.s.vmx.u64HostLStarMsr = ASMRdMsr(MSR_K8_LSTAR); 1420 pVCpu->hm.s.vmx.u64HostStarMsr = ASMRdMsr(MSR_K6_STAR); 1421 pVCpu->hm.s.vmx.u64HostSFMaskMsr = ASMRdMsr(MSR_K8_SF_MASK); 1422 pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); 1423 pVCpu->hm.s.vmx.fRestoreHostMsrs |= VMX_RESTORE_HOST_MSR_SAVED_HOST; 1424 } 1431 1425 } 1432 1426 … … 1470 1464 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 1471 1465 1472 #define VMXLOCAL_SAVE_GUEST_MSR(uMsr, a_GuestMsrField, RestoreFlag) \ 1473 do { \ 1474 if (pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag)) \ 1475 { \ 1476 pMixedCtx->msr##a_GuestMsrField = ASMRdMsr(uMsr); \ 1477 Log4(("hmR0VmxLazySaveGuestMsrs: uMsr=%#RX32 GuestValue=%#RX64\n", (uMsr), pMixedCtx->msr##a_GuestMsrField)); \ 1478 } \ 1479 } while (0) 1480 1481 VMXLOCAL_SAVE_GUEST_MSR(MSR_K8_LSTAR, LSTAR, VMX_RESTORE_HOST_MSR_LSTAR); 1482 VMXLOCAL_SAVE_GUEST_MSR(MSR_K6_STAR, STAR, VMX_RESTORE_HOST_MSR_STAR); 1483 VMXLOCAL_SAVE_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, VMX_RESTORE_HOST_MSR_SFMASK); 1484 VMXLOCAL_SAVE_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, VMX_RESTORE_HOST_MSR_KERNELGSBASE); 1485 1486 #undef VMXLOCAL_SAVE_GUEST_MSR 1466 if (pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST) 1467 { 1468 Assert(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_SAVED_HOST); 1469 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); 1470 pMixedCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR); 1471 pMixedCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK); 1472 pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); 1473 } 1487 1474 } 1488 1475 … … 1508 1495 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 1509 1496 1510 #define VMXLOCAL_LOAD_GUEST_MSR(uMsr, a_GuestMsrField, a_HostMsrField, RestoreFlag) \ 1497 #if 0 /* Disabled until issue with non-atomic flag updates is resolved. See @bugref{6398#c170}. */ 1498 Assert(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_SAVED_HOST); 1499 if (!(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST)) 1500 { 1501 #define VMXLOCAL_LAZY_LOAD_GUEST_MSR(uMsr, a_GuestMsr, a_HostMsr) \ 1511 1502 do { \ 1512 if ( (pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag)) \ 1513 || pMixedCtx->msr##a_GuestMsrField != pVCpu->hm.s.vmx.u64Host##a_HostMsrField) \ 1514 { \ 1515 ASMWrMsr((uMsr), pMixedCtx->msr##a_GuestMsrField); \ 1516 } \ 1517 pVCpu->hm.s.vmx.fRestoreHostMsrs |= (RestoreFlag); \ 1518 Log4(("Load: MSRSWAP uMsr=%#RX32 GuestValue=%#RX64\n", (uMsr), pMixedCtx->msr##a_GuestMsrField)); \ 1503 if (pMixedCtx->msr##a_GuestMsr != pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr) \ 1504 ASMWrMsr(uMsr, pMixedCtx->msr##a_GuestMsr); \ 1505 else \ 1506 Assert(ASMRdMsr(uMsr) == pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr); \ 1519 1507 } while (0) 1520 1508 1521 VMXLOCAL_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStarMsr, VMX_RESTORE_HOST_MSR_LSTAR); 1522 VMXLOCAL_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, StarMsr, VMX_RESTORE_HOST_MSR_STAR); 1523 VMXLOCAL_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMaskMsr, VMX_RESTORE_HOST_MSR_SFMASK); 1524 VMXLOCAL_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBaseMsr, VMX_RESTORE_HOST_MSR_KERNELGSBASE); 1525 1526 #undef VMXLOCAL_LOAD_GUEST_MSR 1509 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar); 1510 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star); 1511 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask); 1512 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase); 1513 #undef VMXLOCAL_LAZY_LOAD_GUEST_MSR 1514 } 1515 else 1516 #endif 1517 { 1518 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR); 1519 ASMWrMsr(MSR_K6_STAR, pMixedCtx->msrSTAR); 1520 ASMWrMsr(MSR_K8_SF_MASK, pMixedCtx->msrSFMASK); 1521 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE); 1522 } 1523 pVCpu->hm.s.vmx.fRestoreHostMsrs |= VMX_RESTORE_HOST_MSR_LOADED_GUEST; 1527 1524 } 1528 1525 … … 1543 1540 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 1544 1541 1545 #define VMXLOCAL_RESTORE_HOST_MSR(uMsr, a_HostMsrField, RestoreFlag) \ 1546 do { \ 1547 if (pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag)) \ 1548 { \ 1549 ASMWrMsr((uMsr), pVCpu->hm.s.vmx.u64Host##a_HostMsrField); \ 1550 pVCpu->hm.s.vmx.fRestoreHostMsrs &= ~(RestoreFlag); \ 1551 Log4(("hmR0VmxLazyRestoreHostMsrs: uMsr=%#RX32 HostValue=%#RX64\n", (uMsr), \ 1552 pVCpu->hm.s.vmx.u64Host##a_HostMsrField)); \ 1553 } \ 1554 } while (0) 1555 1556 VMXLOCAL_RESTORE_HOST_MSR(MSR_K8_LSTAR, LStarMsr, VMX_RESTORE_HOST_MSR_LSTAR); 1557 VMXLOCAL_RESTORE_HOST_MSR(MSR_K6_STAR, StarMsr, VMX_RESTORE_HOST_MSR_STAR); 1558 VMXLOCAL_RESTORE_HOST_MSR(MSR_K8_SF_MASK, SFMaskMsr, VMX_RESTORE_HOST_MSR_SFMASK); 1559 VMXLOCAL_RESTORE_HOST_MSR(MSR_K8_KERNEL_GS_BASE, KernelGSBaseMsr, VMX_RESTORE_HOST_MSR_KERNELGSBASE); 1560 1561 #undef VMXLOCAL_RESTORE_HOST_MSR 1542 if (pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST) 1543 { 1544 Assert(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_SAVED_HOST); 1545 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostLStarMsr); 1546 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostStarMsr); 1547 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostSFMaskMsr); 1548 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr); 1549 } 1550 pVCpu->hm.s.vmx.fRestoreHostMsrs &= ~(VMX_RESTORE_HOST_MSR_LOADED_GUEST | VMX_RESTORE_HOST_MSR_SAVED_HOST); 1562 1551 } 1563 1552 #endif /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */ … … 4411 4400 4412 4401 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR); 4413 Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", 4402 Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base)); 4414 4403 } 4415 4404 … … 4476 4465 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 4477 4466 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++) 4478 Log4((" MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));4467 Log4(("Load: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value)); 4479 4468 # endif 4480 4469 #endif
Note:
See TracChangeset
for help on using the changeset viewer.