Changeset 47760 in vbox for trunk/src/VBox/VMM/VMMR0/HMR0.cpp
- Timestamp:
- Aug 15, 2013 12:57:02 PM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r47652 r47760 86 86 /** @name Ring-0 method table for AMD-V and VT-x specific operations. 87 87 * @{ */ 88 DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)); 89 DECLR0CALLBACKMEMBER(int, pfnLeaveSession,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 90 DECLR0CALLBACKMEMBER(int, pfnSaveHostState,(PVM pVM, PVMCPU pVCpu)); 91 DECLR0CALLBACKMEMBER(int, pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 92 DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 93 DECLR0CALLBACKMEMBER(int, pfnEnableCpu,(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, 94 bool fEnabledByHost)); 95 DECLR0CALLBACKMEMBER(int, pfnDisableCpu,(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)); 96 DECLR0CALLBACKMEMBER(int, pfnInitVM,(PVM pVM)); 97 DECLR0CALLBACKMEMBER(int, pfnTermVM,(PVM pVM)); 98 DECLR0CALLBACKMEMBER(int, pfnSetupVM,(PVM pVM)); 88 DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)); 89 DECLR0CALLBACKMEMBER(int, pfnLeaveSession,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 90 DECLR0CALLBACKMEMBER(void, pfnThreadCtxCallback,(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)); 91 DECLR0CALLBACKMEMBER(int, pfnSaveHostState,(PVM pVM, PVMCPU pVCpu)); 92 DECLR0CALLBACKMEMBER(int, pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 93 DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 94 DECLR0CALLBACKMEMBER(int, pfnEnableCpu,(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, 95 bool fEnabledByHost)); 96 DECLR0CALLBACKMEMBER(int, pfnDisableCpu,(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)); 97 DECLR0CALLBACKMEMBER(int, pfnInitVM,(PVM pVM)); 98 DECLR0CALLBACKMEMBER(int, pfnTermVM,(PVM pVM)); 99 DECLR0CALLBACKMEMBER(int, pfnSetupVM,(PVM pVM)); 99 100 /** @} */ 100 101 … … 251 252 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx); 252 253 return VINF_SUCCESS; 254 } 255 256 static DECLCALLBACK(void) hmR0DummyThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit) 257 { 258 NOREF(enmEvent); NOREF(pVCpu); NOREF(fGlobalInit); 253 259 } 254 260 … … 517 523 * Install the VT-x methods. 518 524 */ 519 g_HvmR0.pfnEnterSession = VMXR0Enter; 520 g_HvmR0.pfnLeaveSession = VMXR0Leave; 521 g_HvmR0.pfnSaveHostState = VMXR0SaveHostState; 522 g_HvmR0.pfnLoadGuestState = VMXR0LoadGuestState; 523 g_HvmR0.pfnRunGuestCode = VMXR0RunGuestCode; 524 g_HvmR0.pfnEnableCpu = VMXR0EnableCpu; 525 g_HvmR0.pfnDisableCpu = VMXR0DisableCpu; 526 g_HvmR0.pfnInitVM = VMXR0InitVM; 527 g_HvmR0.pfnTermVM = VMXR0TermVM; 528 g_HvmR0.pfnSetupVM = VMXR0SetupVM; 525 g_HvmR0.pfnEnterSession = VMXR0Enter; 526 g_HvmR0.pfnLeaveSession = VMXR0Leave; 527 g_HvmR0.pfnThreadCtxCallback = VMXR0ThreadCtxCallback; 528 g_HvmR0.pfnSaveHostState = VMXR0SaveHostState; 529 g_HvmR0.pfnLoadGuestState = VMXR0LoadGuestState; 530 g_HvmR0.pfnRunGuestCode = VMXR0RunGuestCode; 531 g_HvmR0.pfnEnableCpu = VMXR0EnableCpu; 532 g_HvmR0.pfnDisableCpu = VMXR0DisableCpu; 533 g_HvmR0.pfnInitVM = VMXR0InitVM; 534 g_HvmR0.pfnTermVM = VMXR0TermVM; 535 g_HvmR0.pfnSetupVM = VMXR0SetupVM; 529 536 530 537 /* … … 582 589 * Install the AMD-V methods. 583 590 */ 584 g_HvmR0.pfnEnterSession = SVMR0Enter; 585 g_HvmR0.pfnLeaveSession = SVMR0Leave; 586 g_HvmR0.pfnSaveHostState = SVMR0SaveHostState; 587 g_HvmR0.pfnLoadGuestState = SVMR0LoadGuestState; 588 g_HvmR0.pfnRunGuestCode = SVMR0RunGuestCode; 589 g_HvmR0.pfnEnableCpu = SVMR0EnableCpu; 590 g_HvmR0.pfnDisableCpu = SVMR0DisableCpu; 591 g_HvmR0.pfnInitVM = SVMR0InitVM; 592 g_HvmR0.pfnTermVM = SVMR0TermVM; 593 g_HvmR0.pfnSetupVM = SVMR0SetupVM; 591 g_HvmR0.pfnEnterSession = SVMR0Enter; 592 g_HvmR0.pfnLeaveSession = SVMR0Leave; 593 g_HvmR0.pfnThreadCtxCallback = SVMR0ThreadCtxCallback; 594 g_HvmR0.pfnSaveHostState = SVMR0SaveHostState; 595 g_HvmR0.pfnLoadGuestState = SVMR0LoadGuestState; 596 g_HvmR0.pfnRunGuestCode = SVMR0RunGuestCode; 597 g_HvmR0.pfnEnableCpu = SVMR0EnableCpu; 598 g_HvmR0.pfnDisableCpu = SVMR0DisableCpu; 599 g_HvmR0.pfnInitVM = SVMR0InitVM; 600 g_HvmR0.pfnTermVM = SVMR0TermVM; 601 g_HvmR0.pfnSetupVM = SVMR0SetupVM; 594 602 595 603 /* Query AMD features. */ … … 646 654 647 655 /* Fill in all callbacks with placeholders. */ 648 g_HvmR0.pfnEnterSession = hmR0DummyEnter; 649 g_HvmR0.pfnLeaveSession = hmR0DummyLeave; 650 g_HvmR0.pfnSaveHostState = hmR0DummySaveHostState; 651 g_HvmR0.pfnLoadGuestState = hmR0DummyLoadGuestState; 652 g_HvmR0.pfnRunGuestCode = hmR0DummyRunGuestCode; 653 g_HvmR0.pfnEnableCpu = hmR0DummyEnableCpu; 654 g_HvmR0.pfnDisableCpu = hmR0DummyDisableCpu; 655 g_HvmR0.pfnInitVM = hmR0DummyInitVM; 656 g_HvmR0.pfnTermVM = hmR0DummyTermVM; 657 g_HvmR0.pfnSetupVM = hmR0DummySetupVM; 656 g_HvmR0.pfnEnterSession = hmR0DummyEnter; 657 g_HvmR0.pfnLeaveSession = hmR0DummyLeave; 658 g_HvmR0.pfnThreadCtxCallback = hmR0DummyThreadCtxCallback; 659 g_HvmR0.pfnSaveHostState = hmR0DummySaveHostState; 660 g_HvmR0.pfnLoadGuestState = hmR0DummyLoadGuestState; 661 g_HvmR0.pfnRunGuestCode = hmR0DummyRunGuestCode; 662 g_HvmR0.pfnEnableCpu = hmR0DummyEnableCpu; 663 g_HvmR0.pfnDisableCpu = hmR0DummyDisableCpu; 664 g_HvmR0.pfnInitVM = hmR0DummyInitVM; 665 g_HvmR0.pfnTermVM = hmR0DummyTermVM; 666 g_HvmR0.pfnSetupVM = hmR0DummySetupVM; 658 667 659 668 /* Default is global VT-x/AMD-V init. */ … … 912 921 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo)); 913 922 Assert(!pCpu->fConfigured); 914 Assert(!g_HvmR0.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);915 923 916 924 pCpu->idCpu = idCpu; … … 1067 1075 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day) 1068 1076 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo)); 1069 Assert(!g_HvmR0.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);1070 1077 Assert(!pCpu->fConfigured || pCpu->hMemObj != NIL_RTR0MEMOBJ); 1071 1078 … … 1296 1303 /* 1297 1304 * Call the hardware specific initialization method. 1298 *1299 * Note! The fInUse handling here isn't correct as we can we can be1300 * rescheduled to a different cpu, but the fInUse case is mostly for1301 * debugging... Disabling preemption isn't an option when allocating1302 * memory, so we'll let it slip for now.1303 1305 */ 1304 1306 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1305 1307 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 1306 ASMAtomicWriteBool(&pCpu->fInUse, true);1307 1308 ASMSetFlags(fFlags); 1308 1309 1309 1310 int rc = g_HvmR0.pfnInitVM(pVM); 1310 1311 ASMAtomicWriteBool(&pCpu->fInUse, false);1312 1311 return rc; 1313 1312 } … … 1334 1333 /* 1335 1334 * Call the hardware specific method. 1336 *1337 * Note! Not correct as we can be rescheduled to a different cpu, but the1338 * fInUse case is mostly for debugging.1339 1335 */ 1340 1336 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1341 1337 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 1342 ASMAtomicWriteBool(&pCpu->fInUse, true);1343 1338 ASMSetFlags(fFlags); 1344 1339 1345 1340 int rc = g_HvmR0.pfnTermVM(pVM); 1346 1347 ASMAtomicWriteBool(&pCpu->fInUse, false);1348 1341 return rc; 1349 1342 } … … 1374 1367 RTCPUID idCpu = RTMpCpuId(); 1375 1368 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; 1376 ASMAtomicWriteBool(&pCpu->fInUse, true);1377 1369 1378 1370 /* On first entry we'll sync everything. */ … … 1398 1390 } 1399 1391 1400 ASMAtomicWriteBool(&pCpu->fInUse, false);1401 1392 ASMSetFlags(fFlags); 1402 1403 1393 return rc; 1394 } 1395 1396 1397 /** 1398 * Initializes the bare minimum state required for entering HM context. 1399 * 1400 * @param pvCpu Pointer to the VMCPU. 1401 */ 1402 VMMR0_INT_DECL(void) HMR0EnterEx(PVMCPU pVCpu) 1403 { 1404 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1405 1406 RTCPUID idCpu = RTMpCpuId(); 1407 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; 1408 AssertPtr(pCpu); 1409 1410 pVCpu->hm.s.idEnteredCpu = idCpu; 1411 1412 /* Reload the host context and the guest's CR0 register for the FPU bits. */ 1413 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_HOST_CONTEXT; 1414 1415 /* Enable VT-x or AMD-V if local init is required, or enable if it's a freshly onlined CPU. */ 1416 if ( !pCpu->fConfigured 1417 || !g_HvmR0.fGlobalInit) 1418 { 1419 hmR0EnableCpu(pVCpu->CTX_SUFF(pVM), idCpu); 1420 } 1404 1421 } 1405 1422 … … 1416 1433 VMMR0_INT_DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu) 1417 1434 { 1435 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1436 1437 /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */ 1438 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1439 1440 /* Load the bare minimum state required for entering HM. */ 1441 HMR0EnterEx(pVCpu); 1442 1443 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1444 AssertReturn(!VMMR0ThreadCtxHooksAreRegistered(pVCpu), VERR_HM_IPE_5); 1445 bool fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu); 1446 #endif 1447 1418 1448 RTCPUID idCpu = RTMpCpuId(); 1419 1449 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; 1420 1421 /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */ 1422 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1423 ASMAtomicWriteBool(&pCpu->fInUse, true); 1424 1425 AssertMsg(pVCpu->hm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hm.s.idEnteredCpu)); 1426 pVCpu->hm.s.idEnteredCpu = idCpu; 1427 1428 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1429 1430 /* Always load the guest's FPU/XMM state on-demand. */ 1431 CPUMDeactivateGuestFPUState(pVCpu); 1432 1433 /* Always load the guest's debug state on-demand. */ 1434 CPUMDeactivateGuestDebugState(pVCpu); 1435 1436 /* Always reload the host context and the guest's CR0 register for the FPU 1437 bits (#NM, #MF, CR0.NE, CR0.TS, CR0.MP). */ 1438 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_HOST_CONTEXT; 1439 1440 /* Enable VT-x or AMD-V if local init is required, or enable if it's a 1441 freshly onlined CPU. */ 1442 int rc; 1443 if ( !pCpu->fConfigured 1444 || !g_HvmR0.fGlobalInit) 1445 { 1446 rc = hmR0EnableCpu(pVM, idCpu); 1447 AssertRCReturn(rc, rc); 1448 } 1449 1450 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1451 bool fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu); 1452 #endif 1453 1454 rc = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu); 1450 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1451 Assert(pCpu); 1452 Assert(pCtx); 1453 1454 int rc = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu); 1455 1455 AssertRC(rc); 1456 1456 1457 /* We must save the host context here (VT-x) as we might be rescheduled on 1457 1458 a different cpu after a long jump back to ring 3. */ 1459 /** @todo This will change with preemption hooks. */ 1458 1460 rc |= g_HvmR0.pfnSaveHostState(pVM, pVCpu); 1459 1461 AssertRC(rc); 1462 1460 1463 rc |= g_HvmR0.pfnLoadGuestState(pVM, pVCpu, pCtx); 1461 1464 AssertRC(rc); … … 1475 1478 1476 1479 /** 1480 * Deinitializes the bare minimum state used for HM context. 1481 * 1482 * @returns VBox status code. 1483 * @param pVCpu Pointer to the VMCPU. 1484 * @param idCpu The identifier for the CPU the function is called on. 1485 */ 1486 VMMR0_INT_DECL(int) HMR0LeaveEx(PVMCPU pVCpu) 1487 { 1488 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1489 1490 if (!g_HvmR0.fGlobalInit) 1491 { 1492 RTCPUID idCpu = RTMpCpuId(); 1493 int rc = hmR0DisableCpu(idCpu); 1494 AssertRCReturn(rc, rc); 1495 } 1496 1497 /* Reset these to force a TLB flush for the next entry. */ 1498 pVCpu->hm.s.idLastCpu = NIL_RTCPUID; 1499 pVCpu->hm.s.uCurrentAsid = 0; 1500 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 1501 1502 return VINF_SUCCESS; 1503 } 1504 1505 1506 /** 1477 1507 * Leaves the VT-x or AMD-V session. 1478 1508 * … … 1486 1516 VMMR0_INT_DECL(int) HMR0Leave(PVM pVM, PVMCPU pVCpu) 1487 1517 { 1488 int rc;1489 RTCPUID idCpu = RTMpCpuId();1490 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];1491 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);1492 1493 1518 /** @todo r=bird: This can't be entirely right? */ 1494 1519 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1495 1520 1521 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1522 AssertPtr(pCtx); 1523 1524 int rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx); 1525 1496 1526 /* 1497 * Save the guest FPU and XMM state if necessary. 1498 * 1499 * Note! It's rather tricky with longjmps done by e.g. Log statements or 1500 * the page fault handler. We must restore the host FPU here to make 1501 * absolutely sure we don't leave the guest FPU state active or trash 1502 * somebody else's FPU state. 1527 * When thread-context hooks are not used, leave HM context and if necessary disable HM on the CPU. 1528 * When thread-context hooks -are- used, this work would be done in the VT-x and AMD-V thread-context callback. 1503 1529 */ 1504 if (CPUMIsGuestFPUStateActive(pVCpu)) 1505 { 1506 Log2(("CPUMR0SaveGuestFPU\n")); 1507 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx); 1508 1509 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1510 } 1511 1512 rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx); 1513 1514 /* We don't pass on invlpg information to the recompiler for nested paging 1515 guests, so we must make sure the recompiler flushes its TLB the next 1516 time it executes code. */ 1517 if ( pVM->hm.s.fNestedPaging 1518 && CPUMIsGuestPagingEnabledEx(pCtx)) 1519 { 1520 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH); 1521 } 1522 1523 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness 1524 and ring-3 calls. */ 1525 AssertMsgStmt( pVCpu->hm.s.idEnteredCpu == idCpu 1526 || RT_FAILURE_NP(rc), 1527 ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu), 1528 rc = VERR_HM_WRONG_CPU_1); 1530 if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 1531 { 1532 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1533 RTCPUID idCpu = RTMpCpuId(); 1534 1535 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness 1536 and ring-3 calls when thread-context hooks are not supported. */ 1537 AssertMsgStmt( pVCpu->hm.s.idEnteredCpu == idCpu 1538 || RT_FAILURE_NP(rc), ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu), 1539 rc = VERR_HM_WRONG_CPU_1); 1540 1541 rc = HMR0LeaveEx(pVCpu); 1542 AssertRCReturn(rc, rc); 1543 } 1544 1545 /* Guest FPU and debug state shouldn't be active now, it's likely that we're going back to ring-3. */ 1546 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1547 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1548 1529 1549 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID; 1530 1531 /*1532 * Disable VT-x or AMD-V if local init was done before.1533 */1534 if (!g_HvmR0.fGlobalInit)1535 {1536 rc = hmR0DisableCpu(idCpu);1537 AssertRC(rc);1538 1539 /* Reset these to force a TLB flush for the next entry. (-> EXPENSIVE) */1540 pVCpu->hm.s.idLastCpu = NIL_RTCPUID;1541 pVCpu->hm.s.uCurrentAsid = 0;1542 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);1543 }1544 1545 ASMAtomicWriteBool(&pCpu->fInUse, false);1546 1550 return rc; 1551 } 1552 1553 1554 /** 1555 * Thread-context hook for HM. 1556 * 1557 * @param enmEvent The thread-context event. 1558 * @param pvUser Opaque pointer to the VMCPU. 1559 */ 1560 VMMR0_INT_DECL(void) HMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser) 1561 { 1562 PVMCPU pVCpu = (PVMCPU)pvUser; 1563 Assert(pVCpu); 1564 Assert(g_HvmR0.pfnThreadCtxCallback); 1565 1566 g_HvmR0.pfnThreadCtxCallback(enmEvent, pVCpu, g_HvmR0.fGlobalInit); 1547 1567 } 1548 1568 … … 1565 1585 Assert(pCpu->fConfigured); 1566 1586 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1567 Assert(ASMAtomicReadBool(&pCpu->fInUse) == true);1568 1587 #endif 1569 1588 1570 1589 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1590 AssertReturn(!VMMR0ThreadCtxHooksAreRegistered(pVCpu), VERR_HM_IPE_4); 1571 1591 PGMRZDynMapStartAutoSet(pVCpu); 1572 1592 #endif
Note:
See TracChangeset
for help on using the changeset viewer.