- Timestamp:
- Aug 15, 2013 12:57:02 PM (11 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r47652 r47760 86 86 /** @name Ring-0 method table for AMD-V and VT-x specific operations. 87 87 * @{ */ 88 DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)); 89 DECLR0CALLBACKMEMBER(int, pfnLeaveSession,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 90 DECLR0CALLBACKMEMBER(int, pfnSaveHostState,(PVM pVM, PVMCPU pVCpu)); 91 DECLR0CALLBACKMEMBER(int, pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 92 DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 93 DECLR0CALLBACKMEMBER(int, pfnEnableCpu,(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, 94 bool fEnabledByHost)); 95 DECLR0CALLBACKMEMBER(int, pfnDisableCpu,(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)); 96 DECLR0CALLBACKMEMBER(int, pfnInitVM,(PVM pVM)); 97 DECLR0CALLBACKMEMBER(int, pfnTermVM,(PVM pVM)); 98 DECLR0CALLBACKMEMBER(int, pfnSetupVM,(PVM pVM)); 88 DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)); 89 DECLR0CALLBACKMEMBER(int, pfnLeaveSession,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 90 DECLR0CALLBACKMEMBER(void, pfnThreadCtxCallback,(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)); 91 DECLR0CALLBACKMEMBER(int, pfnSaveHostState,(PVM pVM, PVMCPU pVCpu)); 92 DECLR0CALLBACKMEMBER(int, pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 93 DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 94 DECLR0CALLBACKMEMBER(int, pfnEnableCpu,(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, 95 bool fEnabledByHost)); 96 DECLR0CALLBACKMEMBER(int, pfnDisableCpu,(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)); 97 DECLR0CALLBACKMEMBER(int, pfnInitVM,(PVM pVM)); 98 DECLR0CALLBACKMEMBER(int, pfnTermVM,(PVM pVM)); 99 DECLR0CALLBACKMEMBER(int, pfnSetupVM,(PVM pVM)); 99 100 /** @} */ 100 101 … … 251 252 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx); 252 253 return VINF_SUCCESS; 254 } 255 256 static DECLCALLBACK(void) hmR0DummyThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit) 257 { 258 NOREF(enmEvent); NOREF(pVCpu); NOREF(fGlobalInit); 253 259 } 254 260 … … 517 523 * Install the VT-x methods. 518 524 */ 519 g_HvmR0.pfnEnterSession = VMXR0Enter; 520 g_HvmR0.pfnLeaveSession = VMXR0Leave; 521 g_HvmR0.pfnSaveHostState = VMXR0SaveHostState; 522 g_HvmR0.pfnLoadGuestState = VMXR0LoadGuestState; 523 g_HvmR0.pfnRunGuestCode = VMXR0RunGuestCode; 524 g_HvmR0.pfnEnableCpu = VMXR0EnableCpu; 525 g_HvmR0.pfnDisableCpu = VMXR0DisableCpu; 526 g_HvmR0.pfnInitVM = VMXR0InitVM; 527 g_HvmR0.pfnTermVM = VMXR0TermVM; 528 g_HvmR0.pfnSetupVM = VMXR0SetupVM; 525 g_HvmR0.pfnEnterSession = VMXR0Enter; 526 g_HvmR0.pfnLeaveSession = VMXR0Leave; 527 g_HvmR0.pfnThreadCtxCallback = VMXR0ThreadCtxCallback; 528 g_HvmR0.pfnSaveHostState = VMXR0SaveHostState; 529 g_HvmR0.pfnLoadGuestState = VMXR0LoadGuestState; 530 g_HvmR0.pfnRunGuestCode = VMXR0RunGuestCode; 531 g_HvmR0.pfnEnableCpu = VMXR0EnableCpu; 532 g_HvmR0.pfnDisableCpu = VMXR0DisableCpu; 533 g_HvmR0.pfnInitVM = VMXR0InitVM; 534 g_HvmR0.pfnTermVM = VMXR0TermVM; 535 g_HvmR0.pfnSetupVM = VMXR0SetupVM; 529 536 530 537 /* … … 582 589 * Install the AMD-V methods. 583 590 */ 584 g_HvmR0.pfnEnterSession = SVMR0Enter; 585 g_HvmR0.pfnLeaveSession = SVMR0Leave; 586 g_HvmR0.pfnSaveHostState = SVMR0SaveHostState; 587 g_HvmR0.pfnLoadGuestState = SVMR0LoadGuestState; 588 g_HvmR0.pfnRunGuestCode = SVMR0RunGuestCode; 589 g_HvmR0.pfnEnableCpu = SVMR0EnableCpu; 590 g_HvmR0.pfnDisableCpu = SVMR0DisableCpu; 591 g_HvmR0.pfnInitVM = SVMR0InitVM; 592 g_HvmR0.pfnTermVM = SVMR0TermVM; 593 g_HvmR0.pfnSetupVM = SVMR0SetupVM; 591 g_HvmR0.pfnEnterSession = SVMR0Enter; 592 g_HvmR0.pfnLeaveSession = SVMR0Leave; 593 g_HvmR0.pfnThreadCtxCallback = SVMR0ThreadCtxCallback; 594 g_HvmR0.pfnSaveHostState = SVMR0SaveHostState; 595 g_HvmR0.pfnLoadGuestState = SVMR0LoadGuestState; 596 g_HvmR0.pfnRunGuestCode = SVMR0RunGuestCode; 597 g_HvmR0.pfnEnableCpu = SVMR0EnableCpu; 598 g_HvmR0.pfnDisableCpu = SVMR0DisableCpu; 599 g_HvmR0.pfnInitVM = SVMR0InitVM; 600 g_HvmR0.pfnTermVM = SVMR0TermVM; 601 g_HvmR0.pfnSetupVM = SVMR0SetupVM; 594 602 595 603 /* Query AMD features. */ … … 646 654 647 655 /* Fill in all callbacks with placeholders. */ 648 g_HvmR0.pfnEnterSession = hmR0DummyEnter; 649 g_HvmR0.pfnLeaveSession = hmR0DummyLeave; 650 g_HvmR0.pfnSaveHostState = hmR0DummySaveHostState; 651 g_HvmR0.pfnLoadGuestState = hmR0DummyLoadGuestState; 652 g_HvmR0.pfnRunGuestCode = hmR0DummyRunGuestCode; 653 g_HvmR0.pfnEnableCpu = hmR0DummyEnableCpu; 654 g_HvmR0.pfnDisableCpu = hmR0DummyDisableCpu; 655 g_HvmR0.pfnInitVM = hmR0DummyInitVM; 656 g_HvmR0.pfnTermVM = hmR0DummyTermVM; 657 g_HvmR0.pfnSetupVM = hmR0DummySetupVM; 656 g_HvmR0.pfnEnterSession = hmR0DummyEnter; 657 g_HvmR0.pfnLeaveSession = hmR0DummyLeave; 658 g_HvmR0.pfnThreadCtxCallback = hmR0DummyThreadCtxCallback; 659 g_HvmR0.pfnSaveHostState = hmR0DummySaveHostState; 660 g_HvmR0.pfnLoadGuestState = hmR0DummyLoadGuestState; 661 g_HvmR0.pfnRunGuestCode = hmR0DummyRunGuestCode; 662 g_HvmR0.pfnEnableCpu = hmR0DummyEnableCpu; 663 g_HvmR0.pfnDisableCpu = hmR0DummyDisableCpu; 664 g_HvmR0.pfnInitVM = hmR0DummyInitVM; 665 g_HvmR0.pfnTermVM = hmR0DummyTermVM; 666 g_HvmR0.pfnSetupVM = hmR0DummySetupVM; 658 667 659 668 /* Default is global VT-x/AMD-V init. */ … … 912 921 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo)); 913 922 Assert(!pCpu->fConfigured); 914 Assert(!g_HvmR0.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);915 923 916 924 pCpu->idCpu = idCpu; … … 1067 1075 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day) 1068 1076 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo)); 1069 Assert(!g_HvmR0.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);1070 1077 Assert(!pCpu->fConfigured || pCpu->hMemObj != NIL_RTR0MEMOBJ); 1071 1078 … … 1296 1303 /* 1297 1304 * Call the hardware specific initialization method. 1298 *1299 * Note! The fInUse handling here isn't correct as we can we can be1300 * rescheduled to a different cpu, but the fInUse case is mostly for1301 * debugging... Disabling preemption isn't an option when allocating1302 * memory, so we'll let it slip for now.1303 1305 */ 1304 1306 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1305 1307 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 1306 ASMAtomicWriteBool(&pCpu->fInUse, true);1307 1308 ASMSetFlags(fFlags); 1308 1309 1309 1310 int rc = g_HvmR0.pfnInitVM(pVM); 1310 1311 ASMAtomicWriteBool(&pCpu->fInUse, false);1312 1311 return rc; 1313 1312 } … … 1334 1333 /* 1335 1334 * Call the hardware specific method. 1336 *1337 * Note! Not correct as we can be rescheduled to a different cpu, but the1338 * fInUse case is mostly for debugging.1339 1335 */ 1340 1336 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1341 1337 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 1342 ASMAtomicWriteBool(&pCpu->fInUse, true);1343 1338 ASMSetFlags(fFlags); 1344 1339 1345 1340 int rc = g_HvmR0.pfnTermVM(pVM); 1346 1347 ASMAtomicWriteBool(&pCpu->fInUse, false);1348 1341 return rc; 1349 1342 } … … 1374 1367 RTCPUID idCpu = RTMpCpuId(); 1375 1368 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; 1376 ASMAtomicWriteBool(&pCpu->fInUse, true);1377 1369 1378 1370 /* On first entry we'll sync everything. */ … … 1398 1390 } 1399 1391 1400 ASMAtomicWriteBool(&pCpu->fInUse, false);1401 1392 ASMSetFlags(fFlags); 1402 1403 1393 return rc; 1394 } 1395 1396 1397 /** 1398 * Initializes the bare minimum state required for entering HM context. 1399 * 1400 * @param pvCpu Pointer to the VMCPU. 1401 */ 1402 VMMR0_INT_DECL(void) HMR0EnterEx(PVMCPU pVCpu) 1403 { 1404 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1405 1406 RTCPUID idCpu = RTMpCpuId(); 1407 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; 1408 AssertPtr(pCpu); 1409 1410 pVCpu->hm.s.idEnteredCpu = idCpu; 1411 1412 /* Reload the host context and the guest's CR0 register for the FPU bits. */ 1413 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_HOST_CONTEXT; 1414 1415 /* Enable VT-x or AMD-V if local init is required, or enable if it's a freshly onlined CPU. */ 1416 if ( !pCpu->fConfigured 1417 || !g_HvmR0.fGlobalInit) 1418 { 1419 hmR0EnableCpu(pVCpu->CTX_SUFF(pVM), idCpu); 1420 } 1404 1421 } 1405 1422 … … 1416 1433 VMMR0_INT_DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu) 1417 1434 { 1435 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1436 1437 /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */ 1438 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1439 1440 /* Load the bare minimum state required for entering HM. */ 1441 HMR0EnterEx(pVCpu); 1442 1443 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1444 AssertReturn(!VMMR0ThreadCtxHooksAreRegistered(pVCpu), VERR_HM_IPE_5); 1445 bool fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu); 1446 #endif 1447 1418 1448 RTCPUID idCpu = RTMpCpuId(); 1419 1449 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; 1420 1421 /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */ 1422 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1423 ASMAtomicWriteBool(&pCpu->fInUse, true); 1424 1425 AssertMsg(pVCpu->hm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hm.s.idEnteredCpu)); 1426 pVCpu->hm.s.idEnteredCpu = idCpu; 1427 1428 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1429 1430 /* Always load the guest's FPU/XMM state on-demand. */ 1431 CPUMDeactivateGuestFPUState(pVCpu); 1432 1433 /* Always load the guest's debug state on-demand. */ 1434 CPUMDeactivateGuestDebugState(pVCpu); 1435 1436 /* Always reload the host context and the guest's CR0 register for the FPU 1437 bits (#NM, #MF, CR0.NE, CR0.TS, CR0.MP). */ 1438 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_HOST_CONTEXT; 1439 1440 /* Enable VT-x or AMD-V if local init is required, or enable if it's a 1441 freshly onlined CPU. */ 1442 int rc; 1443 if ( !pCpu->fConfigured 1444 || !g_HvmR0.fGlobalInit) 1445 { 1446 rc = hmR0EnableCpu(pVM, idCpu); 1447 AssertRCReturn(rc, rc); 1448 } 1449 1450 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1451 bool fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu); 1452 #endif 1453 1454 rc = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu); 1450 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1451 Assert(pCpu); 1452 Assert(pCtx); 1453 1454 int rc = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu); 1455 1455 AssertRC(rc); 1456 1456 1457 /* We must save the host context here (VT-x) as we might be rescheduled on 1457 1458 a different cpu after a long jump back to ring 3. */ 1459 /** @todo This will change with preemption hooks. */ 1458 1460 rc |= g_HvmR0.pfnSaveHostState(pVM, pVCpu); 1459 1461 AssertRC(rc); 1462 1460 1463 rc |= g_HvmR0.pfnLoadGuestState(pVM, pVCpu, pCtx); 1461 1464 AssertRC(rc); … … 1475 1478 1476 1479 /** 1480 * Deinitializes the bare minimum state used for HM context. 1481 * 1482 * @returns VBox status code. 1483 * @param pVCpu Pointer to the VMCPU. 1484 * @param idCpu The identifier for the CPU the function is called on. 1485 */ 1486 VMMR0_INT_DECL(int) HMR0LeaveEx(PVMCPU pVCpu) 1487 { 1488 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1489 1490 if (!g_HvmR0.fGlobalInit) 1491 { 1492 RTCPUID idCpu = RTMpCpuId(); 1493 int rc = hmR0DisableCpu(idCpu); 1494 AssertRCReturn(rc, rc); 1495 } 1496 1497 /* Reset these to force a TLB flush for the next entry. */ 1498 pVCpu->hm.s.idLastCpu = NIL_RTCPUID; 1499 pVCpu->hm.s.uCurrentAsid = 0; 1500 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 1501 1502 return VINF_SUCCESS; 1503 } 1504 1505 1506 /** 1477 1507 * Leaves the VT-x or AMD-V session. 1478 1508 * … … 1486 1516 VMMR0_INT_DECL(int) HMR0Leave(PVM pVM, PVMCPU pVCpu) 1487 1517 { 1488 int rc;1489 RTCPUID idCpu = RTMpCpuId();1490 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];1491 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);1492 1493 1518 /** @todo r=bird: This can't be entirely right? */ 1494 1519 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1495 1520 1521 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1522 AssertPtr(pCtx); 1523 1524 int rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx); 1525 1496 1526 /* 1497 * Save the guest FPU and XMM state if necessary. 1498 * 1499 * Note! It's rather tricky with longjmps done by e.g. Log statements or 1500 * the page fault handler. We must restore the host FPU here to make 1501 * absolutely sure we don't leave the guest FPU state active or trash 1502 * somebody else's FPU state. 1527 * When thread-context hooks are not used, leave HM context and if necessary disable HM on the CPU. 1528 * When thread-context hooks -are- used, this work would be done in the VT-x and AMD-V thread-context callback. 1503 1529 */ 1504 if (CPUMIsGuestFPUStateActive(pVCpu)) 1505 { 1506 Log2(("CPUMR0SaveGuestFPU\n")); 1507 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx); 1508 1509 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1510 } 1511 1512 rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx); 1513 1514 /* We don't pass on invlpg information to the recompiler for nested paging 1515 guests, so we must make sure the recompiler flushes its TLB the next 1516 time it executes code. */ 1517 if ( pVM->hm.s.fNestedPaging 1518 && CPUMIsGuestPagingEnabledEx(pCtx)) 1519 { 1520 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH); 1521 } 1522 1523 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness 1524 and ring-3 calls. */ 1525 AssertMsgStmt( pVCpu->hm.s.idEnteredCpu == idCpu 1526 || RT_FAILURE_NP(rc), 1527 ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu), 1528 rc = VERR_HM_WRONG_CPU_1); 1530 if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 1531 { 1532 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1533 RTCPUID idCpu = RTMpCpuId(); 1534 1535 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness 1536 and ring-3 calls when thread-context hooks are not supported. */ 1537 AssertMsgStmt( pVCpu->hm.s.idEnteredCpu == idCpu 1538 || RT_FAILURE_NP(rc), ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu), 1539 rc = VERR_HM_WRONG_CPU_1); 1540 1541 rc = HMR0LeaveEx(pVCpu); 1542 AssertRCReturn(rc, rc); 1543 } 1544 1545 /* Guest FPU and debug state shouldn't be active now, it's likely that we're going back to ring-3. */ 1546 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1547 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1548 1529 1549 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID; 1530 1531 /*1532 * Disable VT-x or AMD-V if local init was done before.1533 */1534 if (!g_HvmR0.fGlobalInit)1535 {1536 rc = hmR0DisableCpu(idCpu);1537 AssertRC(rc);1538 1539 /* Reset these to force a TLB flush for the next entry. (-> EXPENSIVE) */1540 pVCpu->hm.s.idLastCpu = NIL_RTCPUID;1541 pVCpu->hm.s.uCurrentAsid = 0;1542 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);1543 }1544 1545 ASMAtomicWriteBool(&pCpu->fInUse, false);1546 1550 return rc; 1551 } 1552 1553 1554 /** 1555 * Thread-context hook for HM. 1556 * 1557 * @param enmEvent The thread-context event. 1558 * @param pvUser Opaque pointer to the VMCPU. 1559 */ 1560 VMMR0_INT_DECL(void) HMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser) 1561 { 1562 PVMCPU pVCpu = (PVMCPU)pvUser; 1563 Assert(pVCpu); 1564 Assert(g_HvmR0.pfnThreadCtxCallback); 1565 1566 g_HvmR0.pfnThreadCtxCallback(enmEvent, pVCpu, g_HvmR0.fGlobalInit); 1547 1567 } 1548 1568 … … 1565 1585 Assert(pCpu->fConfigured); 1566 1586 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1567 Assert(ASMAtomicReadBool(&pCpu->fInUse) == true);1568 1587 #endif 1569 1588 1570 1589 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1590 AssertReturn(!VMMR0ThreadCtxHooksAreRegistered(pVCpu), VERR_HM_IPE_4); 1571 1591 PGMRZDynMapStartAutoSet(pVCpu); 1572 1592 #endif -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r47652 r47760 751 751 ; * @param HCPhysVmcs Physical address of VM control structure 752 752 ; */ 753 ;DECLASM(int) VMXClearV MCS(RTHCPHYS HCPhysVmcs);754 ALIGNCODE(16) 755 BEGINPROC VMXClearV MCS753 ;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs); 754 ALIGNCODE(16) 755 BEGINPROC VMXClearVmcs 756 756 %ifdef RT_ARCH_AMD64 757 757 xor rax, rax … … 796 796 BITS 32 797 797 %endif 798 ENDPROC VMXClearV MCS798 ENDPROC VMXClearVmcs 799 799 800 800 … … 805 805 ; * @param HCPhysVmcs Physical address of VMCS structure 806 806 ; */ 807 ;DECLASM(int) VMXActivateV MCS(RTHCPHYS HCPhysVmcs);808 ALIGNCODE(16) 809 BEGINPROC VMXActivateV MCS807 ;DECLASM(int) VMXActivateVmcs(RTHCPHYS HCPhysVmcs); 808 ALIGNCODE(16) 809 BEGINPROC VMXActivateVmcs 810 810 %ifdef RT_ARCH_AMD64 811 811 xor rax, rax … … 850 850 BITS 32 851 851 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 852 ENDPROC VMXActivateV MCS852 ENDPROC VMXActivateVmcs 853 853 854 854 … … 859 859 ; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer 860 860 ; */ 861 ;DECLASM(int) VMXGetActivate VMCS(RTHCPHYS *pVMCS);862 BEGINPROC VMXGetActivate VMCS861 ;DECLASM(int) VMXGetActivatedVmcs(RTHCPHYS *pVMCS); 862 BEGINPROC VMXGetActivatedVmcs 863 863 %ifdef RT_OS_OS2 864 864 mov eax, VERR_NOT_SUPPORTED … … 899 899 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 900 900 %endif 901 ENDPROC VMXGetActivate VMCS901 ENDPROC VMXGetActivatedVmcs 902 902 903 903 ;/** -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r47718 r47760 239 239 static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite); 240 240 static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu); 241 static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 241 242 242 243 HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); … … 1581 1582 1582 1583 /** 1584 * Thread-context callback for AMD-V. 1585 * 1586 * @param enmEvent The thread-context event. 1587 * @param pVCpu Pointer to the VMCPU. 1588 * @param fGlobalInit Whether global VT-x/AMD-V init. is used. 1589 */ 1590 VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit) 1591 { 1592 switch (enmEvent) 1593 { 1594 case RTTHREADCTXEVENT_PREEMPTING: 1595 { 1596 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1597 1598 PVM pVM = pVCpu->CTX_SUFF(pVM); 1599 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1600 VMMRZCallRing3Disable(pVCpu); /* No longjmps (log-flush, locks) in this fragile context. */ 1601 1602 hmR0SvmLeave(pVM, pVCpu, pCtx); 1603 1604 int rc = HMR0LeaveEx(pVCpu); /* Leave HM context, takes care of local init (term). */ 1605 AssertRC(rc); NOREF(rc); 1606 1607 VMMRZCallRing3Enable(pVCpu); /* Restore longjmp state. */ 1608 break; 1609 } 1610 1611 case RTTHREADCTXEVENT_RESUMED: 1612 { 1613 /* Disable preemption, we don't want to be migrated to another CPU while re-initializing AMD-V state. */ 1614 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1615 RTThreadPreemptDisable(&PreemptState); 1616 1617 /* Initialize the bare minimum state required for HM. This takes care of 1618 initializing AMD-V if necessary (onlined CPUs, local init etc.) */ 1619 HMR0EnterEx(pVCpu); 1620 1621 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT; 1622 1623 RTThreadPreemptRestore(&PreemptState); 1624 break; 1625 } 1626 1627 default: 1628 break; 1629 } 1630 } 1631 1632 1633 /** 1583 1634 * Saves the host state. 1584 1635 * … … 1822 1873 1823 1874 /** 1875 * Does the necessary state syncing before returning to ring-3 for any reason 1876 * (longjmp, preemption, voluntary exits to ring-3) from AMD-V. 1877 * 1878 * @param pVM Pointer to the VM. 1879 * @param pVCpu Pointer to the VMCPU. 1880 * @param pMixedCtx Pointer to the guest-CPU context. 1881 * 1882 * @remarks No-long-jmp zone!!! 1883 */ 1884 static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1885 { 1886 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 1887 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 1888 1889 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ 1890 if (CPUMIsGuestFPUStateActive(pVCpu)) 1891 { 1892 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx); 1893 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1894 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 1895 } 1896 1897 /* 1898 * Restore host debug registers if necessary and resync on next R0 reentry. 1899 */ 1900 #ifdef VBOX_STRICT 1901 if (CPUMIsHyperDebugStateActive(pVCpu)) 1902 { 1903 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1904 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff); 1905 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff); 1906 } 1907 #endif 1908 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */)) 1909 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 1910 1911 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 1912 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1913 1914 1915 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); 1916 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState); 1917 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1); 1918 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2); 1919 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 1920 1921 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 1922 } 1923 1924 1925 /** 1824 1926 * Does the necessary state syncing before doing a longjmp to ring-3. 1927 * 1928 * @param pVM Pointer to the VM. 1929 * @param pVCpu Pointer to the VMCPU. 1930 * @param pCtx Pointer to the guest-CPU context. 1931 * 1932 * @remarks No-long-jmp zone!!! 1933 */ 1934 static void hmR0SvmLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1935 { 1936 hmR0SvmLeave(pVM, pVCpu, pCtx); 1937 } 1938 1939 1940 /** 1941 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores 1942 * any remaining host state) before we longjump to ring-3 and possibly get 1943 * preempted. 1944 * 1945 * @param pVCpu Pointer to the VMCPU. 1946 * @param enmOperation The operation causing the ring-3 longjump. 1947 * @param pvUser The user argument (pointer to the possibly 1948 * out-of-date guest-CPU context). 1949 * 1950 * @remarks Must never be called with @a enmOperation == 1951 * VMMCALLRING3_VM_R0_ASSERTION. 1952 */ 1953 DECLCALLBACK(void) hmR0SvmCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser) 1954 { 1955 /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */ 1956 Assert(pVCpu); 1957 Assert(pvUser); 1958 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 1959 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1960 1961 VMMRZCallRing3Disable(pVCpu); 1962 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 1963 1964 Log4(("hmR0SvmCallRing3Callback->hmR0SvmLongJmpToRing3\n")); 1965 hmR0SvmLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser); 1966 1967 VMMRZCallRing3Enable(pVCpu); 1968 } 1969 1970 1971 /** 1972 * Take necessary actions before going back to ring-3. 1973 * 1974 * An action requires us to go back to ring-3. This function does the necessary 1975 * steps before we can safely return to ring-3. This is not the same as longjmps 1976 * to ring-3, this is voluntary. 1825 1977 * 1826 1978 * @param pVM Pointer to the VM. … … 1829 1981 * @param rcExit The reason for exiting to ring-3. Can be 1830 1982 * VINF_VMM_UNKNOWN_RING3_CALL. 1831 *1832 * @remarks No-long-jmp zone!!!1833 */1834 static void hmR0SvmLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit)1835 {1836 Assert(!VMMRZCallRing3IsEnabled(pVCpu));1837 Assert(VMMR0IsLogFlushDisabled(pVCpu));1838 1839 /* Restore host FPU state if necessary and resync on next R0 reentry .*/1840 if (CPUMIsGuestFPUStateActive(pVCpu))1841 {1842 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);1843 Assert(!CPUMIsGuestFPUStateActive(pVCpu));1844 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;1845 }1846 1847 /*1848 * Restore host debug registers if necessary and resync on next R0 reentry.1849 */1850 #ifdef VBOX_STRICT1851 if (CPUMIsHyperDebugStateActive(pVCpu))1852 {1853 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;1854 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);1855 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);1856 }1857 #endif1858 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))1859 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;1860 Assert(!CPUMIsHyperDebugStateActive(pVCpu));1861 Assert(!CPUMIsGuestDebugStateActive(pVCpu));1862 1863 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);1864 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);1865 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);1866 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);1867 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);1868 1869 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);1870 }1871 1872 1873 /**1874 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores1875 * any remaining host state) before we longjump to ring-3 and possibly get1876 * preempted.1877 *1878 * @param pVCpu Pointer to the VMCPU.1879 * @param enmOperation The operation causing the ring-3 longjump.1880 * @param pvUser The user argument (pointer to the possibly1881 * out-of-date guest-CPU context).1882 *1883 * @remarks Must never be called with @a enmOperation ==1884 * VMMCALLRING3_VM_R0_ASSERTION.1885 */1886 DECLCALLBACK(void) hmR0SvmCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)1887 {1888 /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */1889 Assert(pVCpu);1890 Assert(pvUser);1891 Assert(VMMRZCallRing3IsEnabled(pVCpu));1892 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));1893 1894 VMMRZCallRing3Disable(pVCpu);1895 Assert(VMMR0IsLogFlushDisabled(pVCpu));1896 Log4(("hmR0SvmCallRing3Callback->hmR0SvmLongJmpToRing3\n"));1897 hmR0SvmLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL);1898 VMMRZCallRing3Enable(pVCpu);1899 }1900 1901 1902 /**1903 * Take necessary actions before going back to ring-3.1904 *1905 * An action requires us to go back to ring-3. This function does the necessary1906 * steps before we can safely return to ring-3. This is not the same as longjmps1907 * to ring-3, this is voluntary.1908 *1909 * @param pVM Pointer to the VM.1910 * @param pVCpu Pointer to the VMCPU.1911 * @param pCtx Pointer to the guest-CPU context.1912 * @param rcExit The reason for exiting to ring-3. Can be1913 * VINF_VMM_UNKNOWN_RING3_CALL.1914 1983 */ 1915 1984 static void hmR0SvmExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit) … … 1937 2006 } 1938 2007 1939 /* Sync. the guest state. */1940 hmR0SvmL ongJmpToRing3(pVM, pVCpu, pCtx, rcExit);2008 /* Sync. the necessary state for going back to ring-3. */ 2009 hmR0SvmLeave(pVM, pVCpu, pCtx); 1941 2010 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 1942 2011 … … 1948 2017 | CPUM_CHANGED_TR 1949 2018 | CPUM_CHANGED_HIDDEN_SEL_REGS); 2019 if ( pVM->hm.s.fNestedPaging 2020 && CPUMIsGuestPagingEnabledEx(pCtx)) 2021 { 2022 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH); 2023 } 2024 2025 /* Make sure we've undo the trap flag if we tried to single step something. */ 2026 if (pVCpu->hm.s.fClearTrapFlag) 2027 { 2028 pCtx->eflags.Bits.u1TF = 0; 2029 pVCpu->hm.s.fClearTrapFlag = false; 2030 } 1950 2031 1951 2032 /* On our way back from ring-3 the following needs to be done. */ … … 1955 2036 else 1956 2037 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST; 1957 1958 /* Make sure we've undo the trap flag if we tried to single step something. */1959 if (pVCpu->hm.s.fClearTrapFlag)1960 {1961 pVCpu->hm.s.fClearTrapFlag = false;1962 pCtx->eflags.Bits.u1TF = 0;1963 }1964 2038 1965 2039 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3); -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
r47473 r47760 5 5 6 6 /* 7 * Copyright (C) 2006-201 2Oracle Corporation7 * Copyright (C) 2006-2013 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 42 42 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu); 43 43 VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 44 VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit); 44 45 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage, bool fEnabledBySystem); 45 46 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys); … … 96 97 RT_C_DECLS_END 97 98 98 #endif /* HMSVMR0_h */99 #endif /* ___HMSVMR0_h */ 99 100 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r47747 r47760 2109 2109 2110 2110 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */ 2111 rc = VMXClearV MCS(pVCpu->hm.s.vmx.HCPhysVmcs);2112 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearV MCSfailed! rc=%Rrc (pVM=%p)\n", rc, pVM),2111 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 2112 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM), 2113 2113 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc); 2114 2114 2115 2115 /* Load this VMCS as the current VMCS. */ 2116 rc = VMXActivateV MCS(pVCpu->hm.s.vmx.HCPhysVmcs);2117 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateV MCSfailed! rc=%Rrc (pVM=%p)\n", rc, pVM),2116 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 2117 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM), 2118 2118 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc); 2119 2119 … … 2145 2145 2146 2146 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */ 2147 rc = VMXClearV MCS(pVCpu->hm.s.vmx.HCPhysVmcs);2148 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearV MCS(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),2147 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 2148 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM), 2149 2149 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc); 2150 2150 … … 4348 4348 4349 4349 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */ 4350 VMXClearV MCS(pVCpu->hm.s.vmx.HCPhysVmcs);4350 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 4351 4351 4352 4352 /* Leave VMX Root Mode. */ … … 4379 4379 } 4380 4380 4381 rc2 = VMXActivateV MCS(pVCpu->hm.s.vmx.HCPhysVmcs);4381 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 4382 4382 AssertRC(rc2); 4383 4383 Assert(!(ASMGetFlags() & X86_EFL_IF)); … … 5971 5971 enmTrapType = TRPM_HARDWARE_INT; 5972 5972 break; 5973 5973 5974 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT: 5974 5975 enmTrapType = TRPM_SOFTWARE_INT; 5975 5976 break; 5977 5976 5978 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT: 5977 5979 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */ … … 5979 5981 enmTrapType = TRPM_TRAP; 5980 5982 break; 5983 5981 5984 default: 5982 5985 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType)); … … 6027 6030 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 6028 6031 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 6032 6033 /* Avoid repeating this work when thread-context hooks are used and we had been preempted before 6034 which would've done this work from the VMXR0ThreadCtxCallback(). */ 6035 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 6036 bool fPreemptDisabled = false; 6037 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD)) 6038 { 6039 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu)); 6040 RTThreadPreemptDisable(&PreemptState); 6041 fPreemptDisabled = true; 6042 if (pVCpu->hm.s.vmx.fVmxLeaveDone) 6043 { 6044 RTThreadPreemptRestore(&PreemptState); 6045 return; 6046 } 6047 } 6029 6048 6030 6049 /* Save the guest state if necessary. */ … … 6054 6073 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 6055 6074 6075 /* Restore host-state bits that VT-x only restores partially. */ 6076 if (pVCpu->hm.s.vmx.fRestoreHostFlags) 6077 { 6078 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost); 6079 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 6080 } 6081 6056 6082 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); 6057 6083 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState); … … 6064 6090 6065 6091 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 6092 6093 /* Restore preemption if we previous disabled it ourselves. */ 6094 if (fPreemptDisabled) 6095 { 6096 pVCpu->hm.s.vmx.fVmxLeaveDone = true; 6097 RTThreadPreemptRestore(&PreemptState); 6098 } 6066 6099 } 6067 6100 … … 6114 6147 else if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR)) 6115 6148 { 6116 VMXGetActivate VMCS(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);6149 VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys); 6117 6150 pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs; 6118 6151 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu; … … 6151 6184 } 6152 6185 6186 /* 6187 * Clear the X86_EFL_TF if necessary . 6188 */ 6189 if (pVCpu->hm.s.fClearTrapFlag) 6190 { 6191 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS); 6192 pMixedCtx->eflags.Bits.u1TF = 0; 6193 pVCpu->hm.s.fClearTrapFlag = false; 6194 } 6195 /** @todo there seems to be issues with the resume flag when the monitor trap 6196 * flag is pending without being used. Seen early in bios init when 6197 * accessing APIC page in prot mode. */ 6198 6153 6199 /* On our way back from ring-3 the following needs to be done. */ 6154 6200 /** @todo This can change with preemption hooks. */ … … 6185 6231 VMMRZCallRing3Disable(pVCpu); 6186 6232 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 6233 6187 6234 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu)); 6188 6235 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser); 6236 6189 6237 VMMRZCallRing3Enable(pVCpu); 6190 6238 } … … 6742 6790 6743 6791 /* Load the active VMCS as the current one. */ 6744 int rc = VMXActivateV MCS(pVCpu->hm.s.vmx.HCPhysVmcs);6792 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 6745 6793 if (RT_FAILURE(rc)) 6746 6794 return rc; … … 6749 6797 * as we're no preempted. */ 6750 6798 pVCpu->hm.s.fResumeVM = false; 6799 pVCpu->hm.s.vmx.fVmxLeaveDone = false; 6751 6800 return VINF_SUCCESS; 6801 } 6802 6803 6804 /** 6805 * The thread-context callback (only on platforms which support it). 6806 * 6807 * @param enmEvent The thread-context event. 6808 * @param pVCpu Pointer to the VMCPU. 6809 * @param fGlobalInit Whether global VT-x/AMD-V init. was used. 6810 * @thread EMT. 6811 */ 6812 VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit) 6813 { 6814 switch (enmEvent) 6815 { 6816 case RTTHREADCTXEVENT_PREEMPTING: 6817 { 6818 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 6819 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu)); /* Paranoia. */ 6820 6821 PVM pVM = pVCpu->CTX_SUFF(pVM); 6822 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu); 6823 VMMRZCallRing3Disable(pVCpu); /* No longjmps (log-flush, locks) in this fragile context. */ 6824 hmR0VmxLeave(pVM, pVCpu, pMixedCtx); /* Save the guest-state, restore host-state (FPU, debug etc.). */ 6825 6826 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); /* Flush VMCS CPU state to VMCS region in memory. */ 6827 AssertRC(rc); NOREF(rc); 6828 6829 rc = HMR0LeaveEx(pVCpu); /* Leave HM context, takes care of local init (term). */ 6830 AssertRC(rc); NOREF(rc); 6831 6832 VMMRZCallRing3Enable(pVCpu); /* Restore longjmp state. */ 6833 break; 6834 } 6835 6836 case RTTHREADCTXEVENT_RESUMED: 6837 { 6838 /* Disable preemption, we don't want to be migrated to another CPU while re-initializing VT-x state. */ 6839 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 6840 RTThreadPreemptDisable(&PreemptState); 6841 6842 /* Initialize the bare minimum state required for HM. This takes care of 6843 initializing VT-x if necessary (onlined CPUs, local init etc.) */ 6844 HMR0EnterEx(pVCpu); 6845 6846 /* Load the active VMCS as the current one. */ 6847 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 6848 AssertRC(rc); 6849 6850 pVCpu->hm.s.fResumeVM = false; 6851 pVCpu->hm.s.vmx.fVmxLeaveDone = false; 6852 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT; 6853 6854 /* Restore preemption, migrating to another CPU should be fine now. */ 6855 RTThreadPreemptRestore(&PreemptState); 6856 break; 6857 } 6858 6859 default: 6860 break; 6861 } 6752 6862 } 6753 6863 … … 6764 6874 { 6765 6875 AssertPtr(pVCpu); 6766 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));6767 6876 NOREF(pVM); 6768 6877 NOREF(pCtx); 6769 6878 6770 /** @todo this will change with preemption hooks where we only VMCLEAR when 6771 * we are actually going to be preempted, not all the time like we 6772 * currently do. */ 6773 6774 /* Restore host-state bits that VT-x only restores partially. */ 6775 if (pVCpu->hm.s.vmx.fRestoreHostFlags) 6776 { 6777 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost); 6778 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 6779 } 6780 6781 /* 6782 * Sync the current VMCS (writes back internal data back into the VMCS region in memory) 6783 * and mark the VMCS launch-state as "clear". 6784 */ 6785 int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs); 6786 return rc; 6879 if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 6880 { 6881 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 6882 6883 /* 6884 * Sync the current VMCS (writes back internal data back into the VMCS region in memory) 6885 * and mark the VMCS launch-state as "clear". 6886 */ 6887 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 6888 return rc; 6889 } 6890 6891 /* With thread-context hooks, nothing to do here. It's taken care of in VMXR0ThreadCtxCallback(). */ 6892 return VINF_SUCCESS; 6787 6893 } 6788 6894 … … 7030 7136 * interrupts and handle returning to ring-3 afterwards, but requires very careful state restoration. 7031 7137 */ 7032 /** @todo Rework event evaluation and injection to be completely separate. */ 7138 /** @todo Rework event evaluation and injection to be completely separate. 7139 * Update: Tried it, problem with handling halts. Control never returns to VT-x 7140 * if we exit VT-x with external interrupt pending in a TRPM event. */ 7033 7141 if (TRPMHasTrap(pVCpu)) 7034 7142 hmR0VmxTrpmTrapToPendingEvent(pVCpu); … … 7394 7502 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 7395 7503 } 7396 7397 /*7398 * Clear the X86_EFL_TF if necessary .7399 */7400 if (pVCpu->hm.s.fClearTrapFlag)7401 {7402 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx);7403 AssertRCReturn(rc2, rc2);7404 pVCpu->hm.s.fClearTrapFlag = false;7405 pCtx->eflags.Bits.u1TF = 0;7406 }7407 /** @todo there seems to be issues with the resume flag when the monitor trap7408 * flag is pending without being used. Seen early in bios init when7409 * accessing APIC page in prot mode. */7410 7504 7411 7505 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); … … 7646 7740 AssertRCBreak(rc); 7647 7741 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG) 7648 && (u64Val & 0xfffffe3c)) /* Bits 31 -9, bits 2-5MBZ. */7742 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */ 7649 7743 { 7650 7744 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED); … … 7757 7851 AssertRCBreak(rc); 7758 7852 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)), 7759 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63 -35, bits 31-2 MBZ. */7853 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */ 7760 7854 } 7761 7855 … … 7792 7886 AssertRCBreak(rc); 7793 7887 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)), 7794 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63 -12, bit 9, bits 7-1 MBZ. */7888 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */ 7795 7889 HMVMX_CHECK_BREAK((u64Val & MSR_K6_EFER_LMA) == (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST), 7796 7890 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH); … … 8014 8108 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID); 8015 8109 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID); 8016 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11 -8 MBZ. */8110 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */ 8017 8111 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff 8018 8112 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID); … … 8117 8211 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val); 8118 8212 AssertRCBreak(rc); 8119 /* Bits 63 -15, Bit 13, Bits 11-4 MBZ. */8213 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */ 8120 8214 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED); 8121 8215 u32Val = u64Val; /* For pending debug exceptions checks below. */ … … 8125 8219 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val); 8126 8220 AssertRCBreak(rc); 8127 /* Bits 31 -15, Bit 13, Bits 11-4 MBZ. */8221 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */ 8128 8222 HMVMX_CHECK_BREAK(!(u64Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED); 8129 8223 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
r47652 r47760 31 31 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu); 32 32 VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 33 VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit); 33 34 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, bool fEnabledBySystem); 34 35 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys); … … 44 45 DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 45 46 47 46 48 # if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 47 49 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); … … 50 52 # endif 51 53 52 /* Cached VMCS accesses -- defined always in the old VT-x code, defined only for 32 hosts on new code. */53 # ifdef VMX_USE_CACHED_VMCS_ACCESSES54 /* Cached VMCS accesses -- defined only for 32 hosts (with 64-bit guest support). */ 55 # ifdef VMX_USE_CACHED_VMCS_ACCESSES 54 56 VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val); 55 57 … … 60 62 return VINF_SUCCESS; 61 63 } 62 # endif64 # endif 63 65 64 66 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r47645 r47760 462 462 { 463 463 RTThreadCtxHooksRelease(pVCpu->vmm.s.hR0ThreadCtx); 464 } 465 466 467 /** 468 * Registers the thread-context hook for this VCPU. 469 * 470 * @param pVCpu Pointer to the VMCPU. 471 * @param pfnThreadHook Pointer to the thread-context callback. 472 * @returns VBox status code. 473 * 474 * @thread EMT. 475 */ 476 VMMR0DECL(int) VMMR0ThreadCtxHooksRegister(PVMCPU pVCpu, PFNRTTHREADCTXHOOK pfnThreadHook) 477 { 478 return RTThreadCtxHooksRegister(pVCpu->vmm.s.hR0ThreadCtx, pfnThreadHook, pVCpu); 479 } 480 481 482 /** 483 * Deregisters the thread-context hook for this VCPU. 484 * 485 * @returns VBox status code. 486 * @param pVCpu Pointer to the VMCPU. 487 * @thread EMT. 488 */ 489 VMMR0DECL(int) VMMR0ThreadCtxHooksDeregister(PVMCPU pVCpu) 490 { 491 return RTThreadCtxHooksDeregister(pVCpu->vmm.s.hR0ThreadCtx); 464 492 } 465 493 … … 830 858 if (!HMR0SuspendPending()) 831 859 { 860 /** @todo VMMR0ThreadCtxHooks support. */ 832 861 rc = HMR0Enter(pVM, pVCpu); 833 862 if (RT_SUCCESS(rc)) -
trunk/src/VBox/VMM/include/HMInternal.h
r47718 r47760 657 657 /** Set if guest was executing in real mode (extra checks). */ 658 658 bool fWasInRealMode; 659 /** Whether we've completed the restoration procedure while leaving the inner 660 * VT-x context. */ 661 bool fVmxLeaveDone; 659 662 } vmx; 660 663
Note:
See TracChangeset
for help on using the changeset viewer.