VirtualBox

Ignore:
Timestamp:
Aug 15, 2013 12:57:02 PM (11 years ago)
Author:
vboxsync
Message:

VMM/HM: Preemption hooks. Some common structural changes and cleanup, and initial imlementation
of VT-x/AMD-V specific hook functionality.. Work in progress.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r47652 r47760  
    8686    /** @name Ring-0 method table for AMD-V and VT-x specific operations.
    8787     * @{ */
    88     DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu));
    89     DECLR0CALLBACKMEMBER(int, pfnLeaveSession,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
    90     DECLR0CALLBACKMEMBER(int, pfnSaveHostState,(PVM pVM, PVMCPU pVCpu));
    91     DECLR0CALLBACKMEMBER(int, pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
    92     DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
    93     DECLR0CALLBACKMEMBER(int, pfnEnableCpu,(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
    94                                             bool fEnabledByHost));
    95     DECLR0CALLBACKMEMBER(int, pfnDisableCpu,(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
    96     DECLR0CALLBACKMEMBER(int, pfnInitVM,(PVM pVM));
    97     DECLR0CALLBACKMEMBER(int, pfnTermVM,(PVM pVM));
    98     DECLR0CALLBACKMEMBER(int, pfnSetupVM,(PVM pVM));
     88    DECLR0CALLBACKMEMBER(int,  pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu));
     89    DECLR0CALLBACKMEMBER(int,  pfnLeaveSession,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
     90    DECLR0CALLBACKMEMBER(void, pfnThreadCtxCallback,(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit));
     91    DECLR0CALLBACKMEMBER(int,  pfnSaveHostState,(PVM pVM, PVMCPU pVCpu));
     92    DECLR0CALLBACKMEMBER(int,  pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
     93    DECLR0CALLBACKMEMBER(int,  pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
     94    DECLR0CALLBACKMEMBER(int,  pfnEnableCpu,(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
     95                                             bool fEnabledByHost));
     96    DECLR0CALLBACKMEMBER(int,  pfnDisableCpu,(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
     97    DECLR0CALLBACKMEMBER(int,  pfnInitVM,(PVM pVM));
     98    DECLR0CALLBACKMEMBER(int,  pfnTermVM,(PVM pVM));
     99    DECLR0CALLBACKMEMBER(int,  pfnSetupVM,(PVM pVM));
    99100    /** @} */
    100101
     
    251252    NOREF(pVM); NOREF(pVCpu); NOREF(pCtx);
    252253    return VINF_SUCCESS;
     254}
     255
     256static DECLCALLBACK(void) hmR0DummyThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
     257{
     258    NOREF(enmEvent); NOREF(pVCpu); NOREF(fGlobalInit);
    253259}
    254260
     
    517523                 * Install the VT-x methods.
    518524                 */
    519                 g_HvmR0.pfnEnterSession     = VMXR0Enter;
    520                 g_HvmR0.pfnLeaveSession     = VMXR0Leave;
    521                 g_HvmR0.pfnSaveHostState    = VMXR0SaveHostState;
    522                 g_HvmR0.pfnLoadGuestState   = VMXR0LoadGuestState;
    523                 g_HvmR0.pfnRunGuestCode     = VMXR0RunGuestCode;
    524                 g_HvmR0.pfnEnableCpu        = VMXR0EnableCpu;
    525                 g_HvmR0.pfnDisableCpu       = VMXR0DisableCpu;
    526                 g_HvmR0.pfnInitVM           = VMXR0InitVM;
    527                 g_HvmR0.pfnTermVM           = VMXR0TermVM;
    528                 g_HvmR0.pfnSetupVM          = VMXR0SetupVM;
     525                g_HvmR0.pfnEnterSession      = VMXR0Enter;
     526                g_HvmR0.pfnLeaveSession      = VMXR0Leave;
     527                g_HvmR0.pfnThreadCtxCallback = VMXR0ThreadCtxCallback;
     528                g_HvmR0.pfnSaveHostState     = VMXR0SaveHostState;
     529                g_HvmR0.pfnLoadGuestState    = VMXR0LoadGuestState;
     530                g_HvmR0.pfnRunGuestCode      = VMXR0RunGuestCode;
     531                g_HvmR0.pfnEnableCpu         = VMXR0EnableCpu;
     532                g_HvmR0.pfnDisableCpu        = VMXR0DisableCpu;
     533                g_HvmR0.pfnInitVM            = VMXR0InitVM;
     534                g_HvmR0.pfnTermVM            = VMXR0TermVM;
     535                g_HvmR0.pfnSetupVM           = VMXR0SetupVM;
    529536
    530537                /*
     
    582589         * Install the AMD-V methods.
    583590         */
    584         g_HvmR0.pfnEnterSession     = SVMR0Enter;
    585         g_HvmR0.pfnLeaveSession     = SVMR0Leave;
    586         g_HvmR0.pfnSaveHostState    = SVMR0SaveHostState;
    587         g_HvmR0.pfnLoadGuestState   = SVMR0LoadGuestState;
    588         g_HvmR0.pfnRunGuestCode     = SVMR0RunGuestCode;
    589         g_HvmR0.pfnEnableCpu        = SVMR0EnableCpu;
    590         g_HvmR0.pfnDisableCpu       = SVMR0DisableCpu;
    591         g_HvmR0.pfnInitVM           = SVMR0InitVM;
    592         g_HvmR0.pfnTermVM           = SVMR0TermVM;
    593         g_HvmR0.pfnSetupVM          = SVMR0SetupVM;
     591        g_HvmR0.pfnEnterSession      = SVMR0Enter;
     592        g_HvmR0.pfnLeaveSession      = SVMR0Leave;
     593        g_HvmR0.pfnThreadCtxCallback = SVMR0ThreadCtxCallback;
     594        g_HvmR0.pfnSaveHostState     = SVMR0SaveHostState;
     595        g_HvmR0.pfnLoadGuestState    = SVMR0LoadGuestState;
     596        g_HvmR0.pfnRunGuestCode      = SVMR0RunGuestCode;
     597        g_HvmR0.pfnEnableCpu         = SVMR0EnableCpu;
     598        g_HvmR0.pfnDisableCpu        = SVMR0DisableCpu;
     599        g_HvmR0.pfnInitVM            = SVMR0InitVM;
     600        g_HvmR0.pfnTermVM            = SVMR0TermVM;
     601        g_HvmR0.pfnSetupVM           = SVMR0SetupVM;
    594602
    595603        /* Query AMD features. */
     
    646654
    647655    /* Fill in all callbacks with placeholders. */
    648     g_HvmR0.pfnEnterSession     = hmR0DummyEnter;
    649     g_HvmR0.pfnLeaveSession     = hmR0DummyLeave;
    650     g_HvmR0.pfnSaveHostState    = hmR0DummySaveHostState;
    651     g_HvmR0.pfnLoadGuestState   = hmR0DummyLoadGuestState;
    652     g_HvmR0.pfnRunGuestCode     = hmR0DummyRunGuestCode;
    653     g_HvmR0.pfnEnableCpu        = hmR0DummyEnableCpu;
    654     g_HvmR0.pfnDisableCpu       = hmR0DummyDisableCpu;
    655     g_HvmR0.pfnInitVM           = hmR0DummyInitVM;
    656     g_HvmR0.pfnTermVM           = hmR0DummyTermVM;
    657     g_HvmR0.pfnSetupVM          = hmR0DummySetupVM;
     656    g_HvmR0.pfnEnterSession      = hmR0DummyEnter;
     657    g_HvmR0.pfnLeaveSession      = hmR0DummyLeave;
     658    g_HvmR0.pfnThreadCtxCallback = hmR0DummyThreadCtxCallback;
     659    g_HvmR0.pfnSaveHostState     = hmR0DummySaveHostState;
     660    g_HvmR0.pfnLoadGuestState    = hmR0DummyLoadGuestState;
     661    g_HvmR0.pfnRunGuestCode      = hmR0DummyRunGuestCode;
     662    g_HvmR0.pfnEnableCpu         = hmR0DummyEnableCpu;
     663    g_HvmR0.pfnDisableCpu        = hmR0DummyDisableCpu;
     664    g_HvmR0.pfnInitVM            = hmR0DummyInitVM;
     665    g_HvmR0.pfnTermVM            = hmR0DummyTermVM;
     666    g_HvmR0.pfnSetupVM           = hmR0DummySetupVM;
    658667
    659668    /* Default is global VT-x/AMD-V init. */
     
    912921    Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
    913922    Assert(!pCpu->fConfigured);
    914     Assert(!g_HvmR0.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);
    915923
    916924    pCpu->idCpu         = idCpu;
     
    10671075    Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
    10681076    Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
    1069     Assert(!g_HvmR0.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);
    10701077    Assert(!pCpu->fConfigured || pCpu->hMemObj != NIL_RTR0MEMOBJ);
    10711078
     
    12961303    /*
    12971304     * Call the hardware specific initialization method.
    1298      *
    1299      * Note! The fInUse handling here isn't correct as we can we can be
    1300      *       rescheduled to a different cpu, but the fInUse case is mostly for
    1301      *       debugging...  Disabling preemption isn't an option when allocating
    1302      *       memory, so we'll let it slip for now.
    13031305     */
    13041306    RTCCUINTREG     fFlags = ASMIntDisableFlags();
    13051307    PHMGLOBLCPUINFO pCpu   = HMR0GetCurrentCpu();
    1306     ASMAtomicWriteBool(&pCpu->fInUse, true);
    13071308    ASMSetFlags(fFlags);
    13081309
    13091310    int rc = g_HvmR0.pfnInitVM(pVM);
    1310 
    1311     ASMAtomicWriteBool(&pCpu->fInUse, false);
    13121311    return rc;
    13131312}
     
    13341333    /*
    13351334     * Call the hardware specific method.
    1336      *
    1337      * Note! Not correct as we can be rescheduled to a different cpu, but the
    1338      *       fInUse case is mostly for debugging.
    13391335     */
    13401336    RTCCUINTREG     fFlags = ASMIntDisableFlags();
    13411337    PHMGLOBLCPUINFO pCpu   = HMR0GetCurrentCpu();
    1342     ASMAtomicWriteBool(&pCpu->fInUse, true);
    13431338    ASMSetFlags(fFlags);
    13441339
    13451340    int rc = g_HvmR0.pfnTermVM(pVM);
    1346 
    1347     ASMAtomicWriteBool(&pCpu->fInUse, false);
    13481341    return rc;
    13491342}
     
    13741367    RTCPUID         idCpu  = RTMpCpuId();
    13751368    PHMGLOBLCPUINFO pCpu   = &g_HvmR0.aCpuInfo[idCpu];
    1376     ASMAtomicWriteBool(&pCpu->fInUse, true);
    13771369
    13781370    /* On first entry we'll sync everything. */
     
    13981390    }
    13991391
    1400     ASMAtomicWriteBool(&pCpu->fInUse, false);
    14011392    ASMSetFlags(fFlags);
    1402 
    14031393    return rc;
     1394}
     1395
     1396
     1397/**
     1398 * Initializes the bare minimum state required for entering HM context.
     1399 *
     1400 * @param   pvCpu       Pointer to the VMCPU.
     1401 */
     1402VMMR0_INT_DECL(void) HMR0EnterEx(PVMCPU pVCpu)
     1403{
     1404    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1405
     1406    RTCPUID          idCpu = RTMpCpuId();
     1407    PHMGLOBLCPUINFO   pCpu = &g_HvmR0.aCpuInfo[idCpu];
     1408    AssertPtr(pCpu);
     1409
     1410    pVCpu->hm.s.idEnteredCpu = idCpu;
     1411
     1412    /* Reload the host context and the guest's CR0 register for the FPU bits. */
     1413    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_HOST_CONTEXT;
     1414
     1415    /* Enable VT-x or AMD-V if local init is required, or enable if it's a freshly onlined CPU. */
     1416    if (   !pCpu->fConfigured
     1417        || !g_HvmR0.fGlobalInit)
     1418    {
     1419        hmR0EnableCpu(pVCpu->CTX_SUFF(pVM), idCpu);
     1420    }
    14041421}
    14051422
     
    14161433VMMR0_INT_DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu)
    14171434{
     1435    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1436
     1437    /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */
     1438    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
     1439
     1440    /* Load the bare minimum state required for entering HM. */
     1441    HMR0EnterEx(pVCpu);
     1442
     1443#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     1444    AssertReturn(!VMMR0ThreadCtxHooksAreRegistered(pVCpu), VERR_HM_IPE_5);
     1445    bool fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu);
     1446#endif
     1447
    14181448    RTCPUID         idCpu = RTMpCpuId();
    14191449    PHMGLOBLCPUINFO pCpu  = &g_HvmR0.aCpuInfo[idCpu];
    1420 
    1421     /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */
    1422     AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    1423     ASMAtomicWriteBool(&pCpu->fInUse, true);
    1424 
    1425     AssertMsg(pVCpu->hm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hm.s.idEnteredCpu));
    1426     pVCpu->hm.s.idEnteredCpu = idCpu;
    1427 
    1428     PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    1429 
    1430     /* Always load the guest's FPU/XMM state on-demand. */
    1431     CPUMDeactivateGuestFPUState(pVCpu);
    1432 
    1433     /* Always load the guest's debug state on-demand. */
    1434     CPUMDeactivateGuestDebugState(pVCpu);
    1435 
    1436     /* Always reload the host context and the guest's CR0 register for the FPU
    1437        bits (#NM, #MF, CR0.NE, CR0.TS, CR0.MP). */
    1438     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_HOST_CONTEXT;
    1439 
    1440     /* Enable VT-x or AMD-V if local init is required, or enable if it's a
    1441        freshly onlined CPU. */
    1442     int rc;
    1443     if (   !pCpu->fConfigured
    1444         || !g_HvmR0.fGlobalInit)
    1445     {
    1446         rc = hmR0EnableCpu(pVM, idCpu);
    1447         AssertRCReturn(rc, rc);
    1448     }
    1449 
    1450 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    1451     bool fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu);
    1452 #endif
    1453 
    1454     rc  = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu);
     1450    PCPUMCTX        pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
     1451    Assert(pCpu);
     1452    Assert(pCtx);
     1453
     1454    int rc  = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu);
    14551455    AssertRC(rc);
     1456
    14561457    /* We must save the host context here (VT-x) as we might be rescheduled on
    14571458       a different cpu after a long jump back to ring 3. */
     1459    /** @todo This will change with preemption hooks. */
    14581460    rc |= g_HvmR0.pfnSaveHostState(pVM, pVCpu);
    14591461    AssertRC(rc);
     1462
    14601463    rc |= g_HvmR0.pfnLoadGuestState(pVM, pVCpu, pCtx);
    14611464    AssertRC(rc);
     
    14751478
    14761479/**
     1480 * Deinitializes the bare minimum state used for HM context.
     1481 *
     1482 * @returns VBox status code.
     1483 * @param   pVCpu       Pointer to the VMCPU.
     1484 * @param   idCpu       The identifier for the CPU the function is called on.
     1485 */
     1486VMMR0_INT_DECL(int) HMR0LeaveEx(PVMCPU pVCpu)
     1487{
     1488    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1489
     1490    if (!g_HvmR0.fGlobalInit)
     1491    {
     1492        RTCPUID idCpu = RTMpCpuId();
     1493        int rc = hmR0DisableCpu(idCpu);
     1494        AssertRCReturn(rc, rc);
     1495    }
     1496
     1497    /* Reset these to force a TLB flush for the next entry. */
     1498    pVCpu->hm.s.idLastCpu    = NIL_RTCPUID;
     1499    pVCpu->hm.s.uCurrentAsid = 0;
     1500    VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
     1501
     1502    return VINF_SUCCESS;
     1503}
     1504
     1505
     1506/**
    14771507 * Leaves the VT-x or AMD-V session.
    14781508 *
     
    14861516VMMR0_INT_DECL(int) HMR0Leave(PVM pVM, PVMCPU pVCpu)
    14871517{
    1488     int             rc;
    1489     RTCPUID         idCpu = RTMpCpuId();
    1490     PHMGLOBLCPUINFO pCpu  = &g_HvmR0.aCpuInfo[idCpu];
    1491     PCPUMCTX        pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
    1492 
    14931518    /** @todo r=bird: This can't be entirely right? */
    14941519    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    14951520
     1521    PCPUMCTX pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
     1522    AssertPtr(pCtx);
     1523
     1524    int rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx);
     1525
    14961526    /*
    1497      * Save the guest FPU and XMM state if necessary.
    1498      *
    1499      * Note! It's rather tricky with longjmps done by e.g. Log statements or
    1500      *       the page fault handler.  We must restore the host FPU here to make
    1501      *       absolutely sure we don't leave the guest FPU state active or trash
    1502      *       somebody else's FPU state.
     1527     * When thread-context hooks are not used, leave HM context and if necessary disable HM on the CPU.
     1528     * When thread-context hooks -are- used, this work would be done in the VT-x and AMD-V thread-context callback.
    15031529     */
    1504     if (CPUMIsGuestFPUStateActive(pVCpu))
    1505     {
    1506         Log2(("CPUMR0SaveGuestFPU\n"));
    1507         CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
    1508 
    1509         Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    1510     }
    1511 
    1512     rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx);
    1513 
    1514     /* We don't pass on invlpg information to the recompiler for nested paging
    1515        guests, so we must make sure the recompiler flushes its TLB the next
    1516        time it executes code. */
    1517     if (   pVM->hm.s.fNestedPaging
    1518         && CPUMIsGuestPagingEnabledEx(pCtx))
    1519     {
    1520         CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
    1521     }
    1522 
    1523     /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness
    1524        and ring-3 calls. */
    1525     AssertMsgStmt(   pVCpu->hm.s.idEnteredCpu == idCpu
    1526                   || RT_FAILURE_NP(rc),
    1527                   ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu),
    1528                   rc = VERR_HM_WRONG_CPU_1);
     1530    if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     1531    {
     1532        Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1533        RTCPUID idCpu = RTMpCpuId();
     1534
     1535        /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness
     1536           and ring-3 calls when thread-context hooks are not supported. */
     1537        AssertMsgStmt(   pVCpu->hm.s.idEnteredCpu == idCpu
     1538                      || RT_FAILURE_NP(rc), ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu),
     1539                      rc = VERR_HM_WRONG_CPU_1);
     1540
     1541        rc = HMR0LeaveEx(pVCpu);
     1542        AssertRCReturn(rc, rc);
     1543    }
     1544
     1545    /* Guest FPU and debug state shouldn't be active now, it's likely that we're going back to ring-3. */
     1546    Assert(!CPUMIsGuestFPUStateActive(pVCpu));
     1547    Assert(!CPUMIsGuestDebugStateActive(pVCpu));
     1548
    15291549    pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
    1530 
    1531     /*
    1532      * Disable VT-x or AMD-V if local init was done before.
    1533      */
    1534     if (!g_HvmR0.fGlobalInit)
    1535     {
    1536         rc = hmR0DisableCpu(idCpu);
    1537         AssertRC(rc);
    1538 
    1539         /* Reset these to force a TLB flush for the next entry. (-> EXPENSIVE) */
    1540         pVCpu->hm.s.idLastCpu    = NIL_RTCPUID;
    1541         pVCpu->hm.s.uCurrentAsid = 0;
    1542         VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    1543     }
    1544 
    1545     ASMAtomicWriteBool(&pCpu->fInUse, false);
    15461550    return rc;
     1551}
     1552
     1553
     1554/**
     1555 * Thread-context hook for HM.
     1556 *
     1557 * @param   enmEvent        The thread-context event.
     1558 * @param   pvUser          Opaque pointer to the VMCPU.
     1559 */
     1560VMMR0_INT_DECL(void) HMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
     1561{
     1562    PVMCPU pVCpu = (PVMCPU)pvUser;
     1563    Assert(pVCpu);
     1564    Assert(g_HvmR0.pfnThreadCtxCallback);
     1565
     1566    g_HvmR0.pfnThreadCtxCallback(enmEvent, pVCpu, g_HvmR0.fGlobalInit);
    15471567}
    15481568
     
    15651585    Assert(pCpu->fConfigured);
    15661586    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    1567     Assert(ASMAtomicReadBool(&pCpu->fInUse) == true);
    15681587#endif
    15691588
    15701589#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     1590    AssertReturn(!VMMR0ThreadCtxHooksAreRegistered(pVCpu), VERR_HM_IPE_4);
    15711591    PGMRZDynMapStartAutoSet(pVCpu);
    15721592#endif
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette