VirtualBox

Changeset 47760 in vbox for trunk/src


Ignore:
Timestamp:
Aug 15, 2013 12:57:02 PM (11 years ago)
Author:
vboxsync
Message:

VMM/HM: Preemption hooks. Some common structural changes and cleanup, and initial imlementation
of VT-x/AMD-V specific hook functionality.. Work in progress.

Location:
trunk/src/VBox/VMM
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r47652 r47760  
    8686    /** @name Ring-0 method table for AMD-V and VT-x specific operations.
    8787     * @{ */
    88     DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu));
    89     DECLR0CALLBACKMEMBER(int, pfnLeaveSession,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
    90     DECLR0CALLBACKMEMBER(int, pfnSaveHostState,(PVM pVM, PVMCPU pVCpu));
    91     DECLR0CALLBACKMEMBER(int, pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
    92     DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
    93     DECLR0CALLBACKMEMBER(int, pfnEnableCpu,(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
    94                                             bool fEnabledByHost));
    95     DECLR0CALLBACKMEMBER(int, pfnDisableCpu,(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
    96     DECLR0CALLBACKMEMBER(int, pfnInitVM,(PVM pVM));
    97     DECLR0CALLBACKMEMBER(int, pfnTermVM,(PVM pVM));
    98     DECLR0CALLBACKMEMBER(int, pfnSetupVM,(PVM pVM));
     88    DECLR0CALLBACKMEMBER(int,  pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu));
     89    DECLR0CALLBACKMEMBER(int,  pfnLeaveSession,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
     90    DECLR0CALLBACKMEMBER(void, pfnThreadCtxCallback,(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit));
     91    DECLR0CALLBACKMEMBER(int,  pfnSaveHostState,(PVM pVM, PVMCPU pVCpu));
     92    DECLR0CALLBACKMEMBER(int,  pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
     93    DECLR0CALLBACKMEMBER(int,  pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
     94    DECLR0CALLBACKMEMBER(int,  pfnEnableCpu,(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
     95                                             bool fEnabledByHost));
     96    DECLR0CALLBACKMEMBER(int,  pfnDisableCpu,(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
     97    DECLR0CALLBACKMEMBER(int,  pfnInitVM,(PVM pVM));
     98    DECLR0CALLBACKMEMBER(int,  pfnTermVM,(PVM pVM));
     99    DECLR0CALLBACKMEMBER(int,  pfnSetupVM,(PVM pVM));
    99100    /** @} */
    100101
     
    251252    NOREF(pVM); NOREF(pVCpu); NOREF(pCtx);
    252253    return VINF_SUCCESS;
     254}
     255
     256static DECLCALLBACK(void) hmR0DummyThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
     257{
     258    NOREF(enmEvent); NOREF(pVCpu); NOREF(fGlobalInit);
    253259}
    254260
     
    517523                 * Install the VT-x methods.
    518524                 */
    519                 g_HvmR0.pfnEnterSession     = VMXR0Enter;
    520                 g_HvmR0.pfnLeaveSession     = VMXR0Leave;
    521                 g_HvmR0.pfnSaveHostState    = VMXR0SaveHostState;
    522                 g_HvmR0.pfnLoadGuestState   = VMXR0LoadGuestState;
    523                 g_HvmR0.pfnRunGuestCode     = VMXR0RunGuestCode;
    524                 g_HvmR0.pfnEnableCpu        = VMXR0EnableCpu;
    525                 g_HvmR0.pfnDisableCpu       = VMXR0DisableCpu;
    526                 g_HvmR0.pfnInitVM           = VMXR0InitVM;
    527                 g_HvmR0.pfnTermVM           = VMXR0TermVM;
    528                 g_HvmR0.pfnSetupVM          = VMXR0SetupVM;
     525                g_HvmR0.pfnEnterSession      = VMXR0Enter;
     526                g_HvmR0.pfnLeaveSession      = VMXR0Leave;
     527                g_HvmR0.pfnThreadCtxCallback = VMXR0ThreadCtxCallback;
     528                g_HvmR0.pfnSaveHostState     = VMXR0SaveHostState;
     529                g_HvmR0.pfnLoadGuestState    = VMXR0LoadGuestState;
     530                g_HvmR0.pfnRunGuestCode      = VMXR0RunGuestCode;
     531                g_HvmR0.pfnEnableCpu         = VMXR0EnableCpu;
     532                g_HvmR0.pfnDisableCpu        = VMXR0DisableCpu;
     533                g_HvmR0.pfnInitVM            = VMXR0InitVM;
     534                g_HvmR0.pfnTermVM            = VMXR0TermVM;
     535                g_HvmR0.pfnSetupVM           = VMXR0SetupVM;
    529536
    530537                /*
     
    582589         * Install the AMD-V methods.
    583590         */
    584         g_HvmR0.pfnEnterSession     = SVMR0Enter;
    585         g_HvmR0.pfnLeaveSession     = SVMR0Leave;
    586         g_HvmR0.pfnSaveHostState    = SVMR0SaveHostState;
    587         g_HvmR0.pfnLoadGuestState   = SVMR0LoadGuestState;
    588         g_HvmR0.pfnRunGuestCode     = SVMR0RunGuestCode;
    589         g_HvmR0.pfnEnableCpu        = SVMR0EnableCpu;
    590         g_HvmR0.pfnDisableCpu       = SVMR0DisableCpu;
    591         g_HvmR0.pfnInitVM           = SVMR0InitVM;
    592         g_HvmR0.pfnTermVM           = SVMR0TermVM;
    593         g_HvmR0.pfnSetupVM          = SVMR0SetupVM;
     591        g_HvmR0.pfnEnterSession      = SVMR0Enter;
     592        g_HvmR0.pfnLeaveSession      = SVMR0Leave;
     593        g_HvmR0.pfnThreadCtxCallback = SVMR0ThreadCtxCallback;
     594        g_HvmR0.pfnSaveHostState     = SVMR0SaveHostState;
     595        g_HvmR0.pfnLoadGuestState    = SVMR0LoadGuestState;
     596        g_HvmR0.pfnRunGuestCode      = SVMR0RunGuestCode;
     597        g_HvmR0.pfnEnableCpu         = SVMR0EnableCpu;
     598        g_HvmR0.pfnDisableCpu        = SVMR0DisableCpu;
     599        g_HvmR0.pfnInitVM            = SVMR0InitVM;
     600        g_HvmR0.pfnTermVM            = SVMR0TermVM;
     601        g_HvmR0.pfnSetupVM           = SVMR0SetupVM;
    594602
    595603        /* Query AMD features. */
     
    646654
    647655    /* Fill in all callbacks with placeholders. */
    648     g_HvmR0.pfnEnterSession     = hmR0DummyEnter;
    649     g_HvmR0.pfnLeaveSession     = hmR0DummyLeave;
    650     g_HvmR0.pfnSaveHostState    = hmR0DummySaveHostState;
    651     g_HvmR0.pfnLoadGuestState   = hmR0DummyLoadGuestState;
    652     g_HvmR0.pfnRunGuestCode     = hmR0DummyRunGuestCode;
    653     g_HvmR0.pfnEnableCpu        = hmR0DummyEnableCpu;
    654     g_HvmR0.pfnDisableCpu       = hmR0DummyDisableCpu;
    655     g_HvmR0.pfnInitVM           = hmR0DummyInitVM;
    656     g_HvmR0.pfnTermVM           = hmR0DummyTermVM;
    657     g_HvmR0.pfnSetupVM          = hmR0DummySetupVM;
     656    g_HvmR0.pfnEnterSession      = hmR0DummyEnter;
     657    g_HvmR0.pfnLeaveSession      = hmR0DummyLeave;
     658    g_HvmR0.pfnThreadCtxCallback = hmR0DummyThreadCtxCallback;
     659    g_HvmR0.pfnSaveHostState     = hmR0DummySaveHostState;
     660    g_HvmR0.pfnLoadGuestState    = hmR0DummyLoadGuestState;
     661    g_HvmR0.pfnRunGuestCode      = hmR0DummyRunGuestCode;
     662    g_HvmR0.pfnEnableCpu         = hmR0DummyEnableCpu;
     663    g_HvmR0.pfnDisableCpu        = hmR0DummyDisableCpu;
     664    g_HvmR0.pfnInitVM            = hmR0DummyInitVM;
     665    g_HvmR0.pfnTermVM            = hmR0DummyTermVM;
     666    g_HvmR0.pfnSetupVM           = hmR0DummySetupVM;
    658667
    659668    /* Default is global VT-x/AMD-V init. */
     
    912921    Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
    913922    Assert(!pCpu->fConfigured);
    914     Assert(!g_HvmR0.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);
    915923
    916924    pCpu->idCpu         = idCpu;
     
    10671075    Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
    10681076    Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
    1069     Assert(!g_HvmR0.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);
    10701077    Assert(!pCpu->fConfigured || pCpu->hMemObj != NIL_RTR0MEMOBJ);
    10711078
     
    12961303    /*
    12971304     * Call the hardware specific initialization method.
    1298      *
    1299      * Note! The fInUse handling here isn't correct as we can we can be
    1300      *       rescheduled to a different cpu, but the fInUse case is mostly for
    1301      *       debugging...  Disabling preemption isn't an option when allocating
    1302      *       memory, so we'll let it slip for now.
    13031305     */
    13041306    RTCCUINTREG     fFlags = ASMIntDisableFlags();
    13051307    PHMGLOBLCPUINFO pCpu   = HMR0GetCurrentCpu();
    1306     ASMAtomicWriteBool(&pCpu->fInUse, true);
    13071308    ASMSetFlags(fFlags);
    13081309
    13091310    int rc = g_HvmR0.pfnInitVM(pVM);
    1310 
    1311     ASMAtomicWriteBool(&pCpu->fInUse, false);
    13121311    return rc;
    13131312}
     
    13341333    /*
    13351334     * Call the hardware specific method.
    1336      *
    1337      * Note! Not correct as we can be rescheduled to a different cpu, but the
    1338      *       fInUse case is mostly for debugging.
    13391335     */
    13401336    RTCCUINTREG     fFlags = ASMIntDisableFlags();
    13411337    PHMGLOBLCPUINFO pCpu   = HMR0GetCurrentCpu();
    1342     ASMAtomicWriteBool(&pCpu->fInUse, true);
    13431338    ASMSetFlags(fFlags);
    13441339
    13451340    int rc = g_HvmR0.pfnTermVM(pVM);
    1346 
    1347     ASMAtomicWriteBool(&pCpu->fInUse, false);
    13481341    return rc;
    13491342}
     
    13741367    RTCPUID         idCpu  = RTMpCpuId();
    13751368    PHMGLOBLCPUINFO pCpu   = &g_HvmR0.aCpuInfo[idCpu];
    1376     ASMAtomicWriteBool(&pCpu->fInUse, true);
    13771369
    13781370    /* On first entry we'll sync everything. */
     
    13981390    }
    13991391
    1400     ASMAtomicWriteBool(&pCpu->fInUse, false);
    14011392    ASMSetFlags(fFlags);
    1402 
    14031393    return rc;
     1394}
     1395
     1396
     1397/**
     1398 * Initializes the bare minimum state required for entering HM context.
     1399 *
     1400 * @param   pvCpu       Pointer to the VMCPU.
     1401 */
     1402VMMR0_INT_DECL(void) HMR0EnterEx(PVMCPU pVCpu)
     1403{
     1404    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1405
     1406    RTCPUID          idCpu = RTMpCpuId();
     1407    PHMGLOBLCPUINFO   pCpu = &g_HvmR0.aCpuInfo[idCpu];
     1408    AssertPtr(pCpu);
     1409
     1410    pVCpu->hm.s.idEnteredCpu = idCpu;
     1411
     1412    /* Reload the host context and the guest's CR0 register for the FPU bits. */
     1413    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_HOST_CONTEXT;
     1414
     1415    /* Enable VT-x or AMD-V if local init is required, or enable if it's a freshly onlined CPU. */
     1416    if (   !pCpu->fConfigured
     1417        || !g_HvmR0.fGlobalInit)
     1418    {
     1419        hmR0EnableCpu(pVCpu->CTX_SUFF(pVM), idCpu);
     1420    }
    14041421}
    14051422
     
    14161433VMMR0_INT_DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu)
    14171434{
     1435    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1436
     1437    /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */
     1438    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
     1439
     1440    /* Load the bare minimum state required for entering HM. */
     1441    HMR0EnterEx(pVCpu);
     1442
     1443#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     1444    AssertReturn(!VMMR0ThreadCtxHooksAreRegistered(pVCpu), VERR_HM_IPE_5);
     1445    bool fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu);
     1446#endif
     1447
    14181448    RTCPUID         idCpu = RTMpCpuId();
    14191449    PHMGLOBLCPUINFO pCpu  = &g_HvmR0.aCpuInfo[idCpu];
    1420 
    1421     /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */
    1422     AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    1423     ASMAtomicWriteBool(&pCpu->fInUse, true);
    1424 
    1425     AssertMsg(pVCpu->hm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hm.s.idEnteredCpu));
    1426     pVCpu->hm.s.idEnteredCpu = idCpu;
    1427 
    1428     PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    1429 
    1430     /* Always load the guest's FPU/XMM state on-demand. */
    1431     CPUMDeactivateGuestFPUState(pVCpu);
    1432 
    1433     /* Always load the guest's debug state on-demand. */
    1434     CPUMDeactivateGuestDebugState(pVCpu);
    1435 
    1436     /* Always reload the host context and the guest's CR0 register for the FPU
    1437        bits (#NM, #MF, CR0.NE, CR0.TS, CR0.MP). */
    1438     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_HOST_CONTEXT;
    1439 
    1440     /* Enable VT-x or AMD-V if local init is required, or enable if it's a
    1441        freshly onlined CPU. */
    1442     int rc;
    1443     if (   !pCpu->fConfigured
    1444         || !g_HvmR0.fGlobalInit)
    1445     {
    1446         rc = hmR0EnableCpu(pVM, idCpu);
    1447         AssertRCReturn(rc, rc);
    1448     }
    1449 
    1450 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    1451     bool fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu);
    1452 #endif
    1453 
    1454     rc  = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu);
     1450    PCPUMCTX        pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
     1451    Assert(pCpu);
     1452    Assert(pCtx);
     1453
     1454    int rc  = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu);
    14551455    AssertRC(rc);
     1456
    14561457    /* We must save the host context here (VT-x) as we might be rescheduled on
    14571458       a different cpu after a long jump back to ring 3. */
     1459    /** @todo This will change with preemption hooks. */
    14581460    rc |= g_HvmR0.pfnSaveHostState(pVM, pVCpu);
    14591461    AssertRC(rc);
     1462
    14601463    rc |= g_HvmR0.pfnLoadGuestState(pVM, pVCpu, pCtx);
    14611464    AssertRC(rc);
     
    14751478
    14761479/**
     1480 * Deinitializes the bare minimum state used for HM context.
     1481 *
     1482 * @returns VBox status code.
     1483 * @param   pVCpu       Pointer to the VMCPU.
     1484 * @param   idCpu       The identifier for the CPU the function is called on.
     1485 */
     1486VMMR0_INT_DECL(int) HMR0LeaveEx(PVMCPU pVCpu)
     1487{
     1488    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1489
     1490    if (!g_HvmR0.fGlobalInit)
     1491    {
     1492        RTCPUID idCpu = RTMpCpuId();
     1493        int rc = hmR0DisableCpu(idCpu);
     1494        AssertRCReturn(rc, rc);
     1495    }
     1496
     1497    /* Reset these to force a TLB flush for the next entry. */
     1498    pVCpu->hm.s.idLastCpu    = NIL_RTCPUID;
     1499    pVCpu->hm.s.uCurrentAsid = 0;
     1500    VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
     1501
     1502    return VINF_SUCCESS;
     1503}
     1504
     1505
     1506/**
    14771507 * Leaves the VT-x or AMD-V session.
    14781508 *
     
    14861516VMMR0_INT_DECL(int) HMR0Leave(PVM pVM, PVMCPU pVCpu)
    14871517{
    1488     int             rc;
    1489     RTCPUID         idCpu = RTMpCpuId();
    1490     PHMGLOBLCPUINFO pCpu  = &g_HvmR0.aCpuInfo[idCpu];
    1491     PCPUMCTX        pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
    1492 
    14931518    /** @todo r=bird: This can't be entirely right? */
    14941519    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    14951520
     1521    PCPUMCTX pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
     1522    AssertPtr(pCtx);
     1523
     1524    int rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx);
     1525
    14961526    /*
    1497      * Save the guest FPU and XMM state if necessary.
    1498      *
    1499      * Note! It's rather tricky with longjmps done by e.g. Log statements or
    1500      *       the page fault handler.  We must restore the host FPU here to make
    1501      *       absolutely sure we don't leave the guest FPU state active or trash
    1502      *       somebody else's FPU state.
     1527     * When thread-context hooks are not used, leave HM context and if necessary disable HM on the CPU.
     1528     * When thread-context hooks -are- used, this work would be done in the VT-x and AMD-V thread-context callback.
    15031529     */
    1504     if (CPUMIsGuestFPUStateActive(pVCpu))
    1505     {
    1506         Log2(("CPUMR0SaveGuestFPU\n"));
    1507         CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
    1508 
    1509         Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    1510     }
    1511 
    1512     rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx);
    1513 
    1514     /* We don't pass on invlpg information to the recompiler for nested paging
    1515        guests, so we must make sure the recompiler flushes its TLB the next
    1516        time it executes code. */
    1517     if (   pVM->hm.s.fNestedPaging
    1518         && CPUMIsGuestPagingEnabledEx(pCtx))
    1519     {
    1520         CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
    1521     }
    1522 
    1523     /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness
    1524        and ring-3 calls. */
    1525     AssertMsgStmt(   pVCpu->hm.s.idEnteredCpu == idCpu
    1526                   || RT_FAILURE_NP(rc),
    1527                   ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu),
    1528                   rc = VERR_HM_WRONG_CPU_1);
     1530    if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     1531    {
     1532        Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1533        RTCPUID idCpu = RTMpCpuId();
     1534
     1535        /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness
     1536           and ring-3 calls when thread-context hooks are not supported. */
     1537        AssertMsgStmt(   pVCpu->hm.s.idEnteredCpu == idCpu
     1538                      || RT_FAILURE_NP(rc), ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu),
     1539                      rc = VERR_HM_WRONG_CPU_1);
     1540
     1541        rc = HMR0LeaveEx(pVCpu);
     1542        AssertRCReturn(rc, rc);
     1543    }
     1544
     1545    /* Guest FPU and debug state shouldn't be active now, it's likely that we're going back to ring-3. */
     1546    Assert(!CPUMIsGuestFPUStateActive(pVCpu));
     1547    Assert(!CPUMIsGuestDebugStateActive(pVCpu));
     1548
    15291549    pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
    1530 
    1531     /*
    1532      * Disable VT-x or AMD-V if local init was done before.
    1533      */
    1534     if (!g_HvmR0.fGlobalInit)
    1535     {
    1536         rc = hmR0DisableCpu(idCpu);
    1537         AssertRC(rc);
    1538 
    1539         /* Reset these to force a TLB flush for the next entry. (-> EXPENSIVE) */
    1540         pVCpu->hm.s.idLastCpu    = NIL_RTCPUID;
    1541         pVCpu->hm.s.uCurrentAsid = 0;
    1542         VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    1543     }
    1544 
    1545     ASMAtomicWriteBool(&pCpu->fInUse, false);
    15461550    return rc;
     1551}
     1552
     1553
     1554/**
     1555 * Thread-context hook for HM.
     1556 *
     1557 * @param   enmEvent        The thread-context event.
     1558 * @param   pvUser          Opaque pointer to the VMCPU.
     1559 */
     1560VMMR0_INT_DECL(void) HMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
     1561{
     1562    PVMCPU pVCpu = (PVMCPU)pvUser;
     1563    Assert(pVCpu);
     1564    Assert(g_HvmR0.pfnThreadCtxCallback);
     1565
     1566    g_HvmR0.pfnThreadCtxCallback(enmEvent, pVCpu, g_HvmR0.fGlobalInit);
    15471567}
    15481568
     
    15651585    Assert(pCpu->fConfigured);
    15661586    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    1567     Assert(ASMAtomicReadBool(&pCpu->fInUse) == true);
    15681587#endif
    15691588
    15701589#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     1590    AssertReturn(!VMMR0ThreadCtxHooksAreRegistered(pVCpu), VERR_HM_IPE_4);
    15711591    PGMRZDynMapStartAutoSet(pVCpu);
    15721592#endif
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r47652 r47760  
    751751; * @param   HCPhysVmcs     Physical address of VM control structure
    752752; */
    753 ;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVmcs);
    754 ALIGNCODE(16)
    755 BEGINPROC VMXClearVMCS
     753;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
     754ALIGNCODE(16)
     755BEGINPROC VMXClearVmcs
    756756%ifdef RT_ARCH_AMD64
    757757    xor     rax, rax
     
    796796BITS 32
    797797%endif
    798 ENDPROC VMXClearVMCS
     798ENDPROC VMXClearVmcs
    799799
    800800
     
    805805; * @param   HCPhysVmcs     Physical address of VMCS structure
    806806; */
    807 ;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVmcs);
    808 ALIGNCODE(16)
    809 BEGINPROC VMXActivateVMCS
     807;DECLASM(int) VMXActivateVmcs(RTHCPHYS HCPhysVmcs);
     808ALIGNCODE(16)
     809BEGINPROC VMXActivateVmcs
    810810%ifdef RT_ARCH_AMD64
    811811    xor     rax, rax
     
    850850BITS 32
    851851%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    852 ENDPROC VMXActivateVMCS
     852ENDPROC VMXActivateVmcs
    853853
    854854
     
    859859; * @param    [esp + 04h]  gcc:rdi  msc:rcx   Param 1 - First parameter - Address that will receive the current pointer
    860860; */
    861 ;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
    862 BEGINPROC VMXGetActivateVMCS
     861;DECLASM(int) VMXGetActivatedVmcs(RTHCPHYS *pVMCS);
     862BEGINPROC VMXGetActivatedVmcs
    863863%ifdef RT_OS_OS2
    864864    mov     eax, VERR_NOT_SUPPORTED
     
    899899 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    900900%endif
    901 ENDPROC VMXGetActivateVMCS
     901ENDPROC VMXGetActivatedVmcs
    902902
    903903;/**
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r47718 r47760  
    239239static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite);
    240240static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu);
     241static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    241242
    242243HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
     
    15811582
    15821583/**
     1584 * Thread-context callback for AMD-V.
     1585 *
     1586 * @param   enmEvent        The thread-context event.
     1587 * @param   pVCpu           Pointer to the VMCPU.
     1588 * @param   fGlobalInit     Whether global VT-x/AMD-V init. is used.
     1589 */
     1590VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
     1591{
     1592    switch (enmEvent)
     1593    {
     1594        case RTTHREADCTXEVENT_PREEMPTING:
     1595        {
     1596            Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1597
     1598            PVM         pVM  = pVCpu->CTX_SUFF(pVM);
     1599            PCPUMCTX    pCtx = CPUMQueryGuestCtxPtr(pVCpu);
     1600            VMMRZCallRing3Disable(pVCpu);                        /* No longjmps (log-flush, locks) in this fragile context. */
     1601
     1602            hmR0SvmLeave(pVM, pVCpu, pCtx);
     1603
     1604            int rc = HMR0LeaveEx(pVCpu);                         /* Leave HM context, takes care of local init (term). */
     1605            AssertRC(rc); NOREF(rc);
     1606
     1607            VMMRZCallRing3Enable(pVCpu);                         /* Restore longjmp state. */
     1608            break;
     1609        }
     1610
     1611        case RTTHREADCTXEVENT_RESUMED:
     1612        {
     1613            /* Disable preemption, we don't want to be migrated to another CPU while re-initializing AMD-V state. */
     1614            RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     1615            RTThreadPreemptDisable(&PreemptState);
     1616
     1617            /* Initialize the bare minimum state required for HM. This takes care of
     1618               initializing AMD-V if necessary (onlined CPUs, local init etc.) */
     1619            HMR0EnterEx(pVCpu);
     1620
     1621            pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
     1622
     1623            RTThreadPreemptRestore(&PreemptState);
     1624            break;
     1625        }
     1626
     1627        default:
     1628            break;
     1629    }
     1630}
     1631
     1632
     1633/**
    15831634 * Saves the host state.
    15841635 *
     
    18221873
    18231874/**
     1875 * Does the necessary state syncing before returning to ring-3 for any reason
     1876 * (longjmp, preemption, voluntary exits to ring-3) from AMD-V.
     1877 *
     1878 * @param   pVM         Pointer to the VM.
     1879 * @param   pVCpu       Pointer to the VMCPU.
     1880 * @param   pMixedCtx   Pointer to the guest-CPU context.
     1881 *
     1882 * @remarks No-long-jmp zone!!!
     1883 */
     1884static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1885{
     1886    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     1887    Assert(VMMR0IsLogFlushDisabled(pVCpu));
     1888
     1889    /* Restore host FPU state if necessary and resync on next R0 reentry .*/
     1890    if (CPUMIsGuestFPUStateActive(pVCpu))
     1891    {
     1892        CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
     1893        Assert(!CPUMIsGuestFPUStateActive(pVCpu));
     1894        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     1895    }
     1896
     1897    /*
     1898     * Restore host debug registers if necessary and resync on next R0 reentry.
     1899     */
     1900#ifdef VBOX_STRICT
     1901    if (CPUMIsHyperDebugStateActive(pVCpu))
     1902    {
     1903        PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     1904        Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
     1905        Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
     1906    }
     1907#endif
     1908    if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))
     1909        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     1910
     1911    Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     1912    Assert(!CPUMIsGuestDebugStateActive(pVCpu));
     1913
     1914
     1915    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
     1916    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
     1917    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
     1918    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
     1919    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
     1920
     1921    VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
     1922}
     1923
     1924
     1925/**
    18241926 * Does the necessary state syncing before doing a longjmp to ring-3.
     1927 *
     1928 * @param   pVM         Pointer to the VM.
     1929 * @param   pVCpu       Pointer to the VMCPU.
     1930 * @param   pCtx        Pointer to the guest-CPU context.
     1931 *
     1932 * @remarks No-long-jmp zone!!!
     1933 */
     1934static void hmR0SvmLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1935{
     1936    hmR0SvmLeave(pVM, pVCpu, pCtx);
     1937}
     1938
     1939
     1940/**
     1941 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
     1942 * any remaining host state) before we longjump to ring-3 and possibly get
     1943 * preempted.
     1944 *
     1945 * @param   pVCpu           Pointer to the VMCPU.
     1946 * @param   enmOperation    The operation causing the ring-3 longjump.
     1947 * @param   pvUser          The user argument (pointer to the possibly
     1948 *                          out-of-date guest-CPU context).
     1949 *
     1950 * @remarks Must never be called with @a enmOperation ==
     1951 *          VMMCALLRING3_VM_R0_ASSERTION.
     1952 */
     1953DECLCALLBACK(void) hmR0SvmCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
     1954{
     1955    /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */
     1956    Assert(pVCpu);
     1957    Assert(pvUser);
     1958    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     1959    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1960
     1961    VMMRZCallRing3Disable(pVCpu);
     1962    Assert(VMMR0IsLogFlushDisabled(pVCpu));
     1963
     1964    Log4(("hmR0SvmCallRing3Callback->hmR0SvmLongJmpToRing3\n"));
     1965    hmR0SvmLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
     1966
     1967    VMMRZCallRing3Enable(pVCpu);
     1968}
     1969
     1970
     1971/**
     1972 * Take necessary actions before going back to ring-3.
     1973 *
     1974 * An action requires us to go back to ring-3. This function does the necessary
     1975 * steps before we can safely return to ring-3. This is not the same as longjmps
     1976 * to ring-3, this is voluntary.
    18251977 *
    18261978 * @param   pVM         Pointer to the VM.
     
    18291981 * @param   rcExit      The reason for exiting to ring-3. Can be
    18301982 *                      VINF_VMM_UNKNOWN_RING3_CALL.
    1831  *
    1832  * @remarks No-long-jmp zone!!!
    1833  */
    1834 static void hmR0SvmLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit)
    1835 {
    1836     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    1837     Assert(VMMR0IsLogFlushDisabled(pVCpu));
    1838 
    1839     /* Restore host FPU state if necessary and resync on next R0 reentry .*/
    1840     if (CPUMIsGuestFPUStateActive(pVCpu))
    1841     {
    1842         CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
    1843         Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    1844         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    1845     }
    1846 
    1847     /*
    1848      * Restore host debug registers if necessary and resync on next R0 reentry.
    1849      */
    1850 #ifdef VBOX_STRICT
    1851     if (CPUMIsHyperDebugStateActive(pVCpu))
    1852     {
    1853         PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    1854         Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
    1855         Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
    1856     }
    1857 #endif
    1858     if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))
    1859         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    1860     Assert(!CPUMIsHyperDebugStateActive(pVCpu));
    1861     Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    1862 
    1863     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
    1864     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
    1865     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
    1866     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
    1867     STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
    1868 
    1869     VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    1870 }
    1871 
    1872 
    1873 /**
    1874  * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
    1875  * any remaining host state) before we longjump to ring-3 and possibly get
    1876  * preempted.
    1877  *
    1878  * @param   pVCpu           Pointer to the VMCPU.
    1879  * @param   enmOperation    The operation causing the ring-3 longjump.
    1880  * @param   pvUser          The user argument (pointer to the possibly
    1881  *                          out-of-date guest-CPU context).
    1882  *
    1883  * @remarks Must never be called with @a enmOperation ==
    1884  *          VMMCALLRING3_VM_R0_ASSERTION.
    1885  */
    1886 DECLCALLBACK(void) hmR0SvmCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
    1887 {
    1888     /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */
    1889     Assert(pVCpu);
    1890     Assert(pvUser);
    1891     Assert(VMMRZCallRing3IsEnabled(pVCpu));
    1892     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1893 
    1894     VMMRZCallRing3Disable(pVCpu);
    1895     Assert(VMMR0IsLogFlushDisabled(pVCpu));
    1896     Log4(("hmR0SvmCallRing3Callback->hmR0SvmLongJmpToRing3\n"));
    1897     hmR0SvmLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL);
    1898     VMMRZCallRing3Enable(pVCpu);
    1899 }
    1900 
    1901 
    1902 /**
    1903  * Take necessary actions before going back to ring-3.
    1904  *
    1905  * An action requires us to go back to ring-3. This function does the necessary
    1906  * steps before we can safely return to ring-3. This is not the same as longjmps
    1907  * to ring-3, this is voluntary.
    1908  *
    1909  * @param   pVM         Pointer to the VM.
    1910  * @param   pVCpu       Pointer to the VMCPU.
    1911  * @param   pCtx        Pointer to the guest-CPU context.
    1912  * @param   rcExit      The reason for exiting to ring-3. Can be
    1913  *                      VINF_VMM_UNKNOWN_RING3_CALL.
    19141983 */
    19151984static void hmR0SvmExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit)
     
    19372006    }
    19382007
    1939     /* Sync. the guest state. */
    1940     hmR0SvmLongJmpToRing3(pVM, pVCpu, pCtx, rcExit);
     2008    /* Sync. the necessary state for going back to ring-3. */
     2009    hmR0SvmLeave(pVM, pVCpu, pCtx);
    19412010    STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
    19422011
     
    19482017                              | CPUM_CHANGED_TR
    19492018                              | CPUM_CHANGED_HIDDEN_SEL_REGS);
     2019    if (   pVM->hm.s.fNestedPaging
     2020        && CPUMIsGuestPagingEnabledEx(pCtx))
     2021    {
     2022        CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
     2023    }
     2024
     2025    /* Make sure we've undo the trap flag if we tried to single step something. */
     2026    if (pVCpu->hm.s.fClearTrapFlag)
     2027    {
     2028        pCtx->eflags.Bits.u1TF = 0;
     2029        pVCpu->hm.s.fClearTrapFlag = false;
     2030    }
    19502031
    19512032    /* On our way back from ring-3 the following needs to be done. */
     
    19552036    else
    19562037        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
    1957 
    1958     /* Make sure we've undo the trap flag if we tried to single step something. */
    1959     if (pVCpu->hm.s.fClearTrapFlag)
    1960     {
    1961         pVCpu->hm.s.fClearTrapFlag = false;
    1962         pCtx->eflags.Bits.u1TF = 0;
    1963     }
    19642038
    19652039    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.h

    r47473 r47760  
    55
    66/*
    7  * Copyright (C) 2006-2012 Oracle Corporation
     7 * Copyright (C) 2006-2013 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    4242VMMR0DECL(int)  SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu);
    4343VMMR0DECL(int)  SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
     44VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
    4445VMMR0DECL(int)  SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage, bool fEnabledBySystem);
    4546VMMR0DECL(int)  SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     
    9697RT_C_DECLS_END
    9798
    98 #endif /* HMSVMR0_h */
     99#endif /* ___HMSVMR0_h */
    99100
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r47747 r47760  
    21092109
    21102110        /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
    2111         rc  = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
    2112         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
     2111        rc  = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
     2112        AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
    21132113                                    hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
    21142114
    21152115        /* Load this VMCS as the current VMCS. */
    2116         rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
    2117         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
     2116        rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
     2117        AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
    21182118                                    hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
    21192119
     
    21452145
    21462146        /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
    2147         rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
    2148         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
     2147        rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
     2148        AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
    21492149                                    hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
    21502150
     
    43484348
    43494349    /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
    4350     VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
     4350    VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    43514351
    43524352    /* Leave VMX Root Mode. */
     
    43794379    }
    43804380
    4381     rc2 = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
     4381    rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    43824382    AssertRC(rc2);
    43834383    Assert(!(ASMGetFlags() & X86_EFL_IF));
     
    59715971           enmTrapType = TRPM_HARDWARE_INT;
    59725972           break;
     5973
    59735974        case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
    59745975            enmTrapType = TRPM_SOFTWARE_INT;
    59755976            break;
     5977
    59765978        case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
    59775979        case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:      /* #BP and #OF */
     
    59795981            enmTrapType = TRPM_TRAP;
    59805982            break;
     5983
    59815984        default:
    59825985            AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
     
    60276030    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    60286031    Assert(VMMR0IsLogFlushDisabled(pVCpu));
     6032
     6033    /* Avoid repeating this work when thread-context hooks are used and we had been preempted before
     6034       which would've done this work from the VMXR0ThreadCtxCallback(). */
     6035    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     6036    bool fPreemptDisabled = false;
     6037    if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
     6038    {
     6039        Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
     6040        RTThreadPreemptDisable(&PreemptState);
     6041        fPreemptDisabled = true;
     6042        if (pVCpu->hm.s.vmx.fVmxLeaveDone)
     6043        {
     6044            RTThreadPreemptRestore(&PreemptState);
     6045            return;
     6046        }
     6047    }
    60296048
    60306049    /* Save the guest state if necessary. */
     
    60546073    Assert(!CPUMIsHyperDebugStateActive(pVCpu));
    60556074
     6075    /* Restore host-state bits that VT-x only restores partially. */
     6076    if (pVCpu->hm.s.vmx.fRestoreHostFlags)
     6077    {
     6078        VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
     6079        pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
     6080    }
     6081
    60566082    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
    60576083    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
     
    60646090
    60656091    VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
     6092
     6093    /* Restore preemption if we previous disabled it ourselves. */
     6094    if (fPreemptDisabled)
     6095    {
     6096        pVCpu->hm.s.vmx.fVmxLeaveDone = true;
     6097        RTThreadPreemptRestore(&PreemptState);
     6098    }
    60666099}
    60676100
     
    61146147    else if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
    61156148    {
    6116         VMXGetActivateVMCS(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
     6149        VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
    61176150        pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
    61186151        pVCpu->hm.s.vmx.LastError.idEnteredCpu    = pVCpu->hm.s.idEnteredCpu;
     
    61516184    }
    61526185
     6186    /*
     6187     * Clear the X86_EFL_TF if necessary .
     6188     */
     6189    if (pVCpu->hm.s.fClearTrapFlag)
     6190    {
     6191        Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
     6192        pMixedCtx->eflags.Bits.u1TF = 0;
     6193        pVCpu->hm.s.fClearTrapFlag = false;
     6194    }
     6195/** @todo there seems to be issues with the resume flag when the monitor trap
     6196 *        flag is pending without being used. Seen early in bios init when
     6197 *        accessing APIC page in prot mode. */
     6198
    61536199    /* On our way back from ring-3 the following needs to be done. */
    61546200    /** @todo This can change with preemption hooks. */
     
    61856231    VMMRZCallRing3Disable(pVCpu);
    61866232    Assert(VMMR0IsLogFlushDisabled(pVCpu));
     6233
    61876234    Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
    61886235    hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
     6236
    61896237    VMMRZCallRing3Enable(pVCpu);
    61906238}
     
    67426790
    67436791    /* Load the active VMCS as the current one. */
    6744     int rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
     6792    int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    67456793    if (RT_FAILURE(rc))
    67466794        return rc;
     
    67496797     *        as we're no preempted. */
    67506798    pVCpu->hm.s.fResumeVM = false;
     6799    pVCpu->hm.s.vmx.fVmxLeaveDone = false;
    67516800    return VINF_SUCCESS;
     6801}
     6802
     6803
     6804/**
     6805 * The thread-context callback (only on platforms which support it).
     6806 *
     6807 * @param   enmEvent        The thread-context event.
     6808 * @param   pVCpu           Pointer to the VMCPU.
     6809 * @param   fGlobalInit     Whether global VT-x/AMD-V init. was used.
     6810 * @thread EMT.
     6811 */
     6812VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
     6813{
     6814    switch (enmEvent)
     6815    {
     6816        case RTTHREADCTXEVENT_PREEMPTING:
     6817        {
     6818            Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     6819            Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));     /* Paranoia. */
     6820
     6821            PVM         pVM       = pVCpu->CTX_SUFF(pVM);
     6822            PCPUMCTX    pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);
     6823            VMMRZCallRing3Disable(pVCpu);                        /* No longjmps (log-flush, locks) in this fragile context. */
     6824            hmR0VmxLeave(pVM, pVCpu, pMixedCtx);                 /* Save the guest-state, restore host-state (FPU, debug etc.). */
     6825
     6826            int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);   /* Flush VMCS CPU state to VMCS region in memory. */
     6827            AssertRC(rc); NOREF(rc);
     6828
     6829            rc = HMR0LeaveEx(pVCpu);                             /* Leave HM context, takes care of local init (term). */
     6830            AssertRC(rc); NOREF(rc);
     6831
     6832            VMMRZCallRing3Enable(pVCpu);                         /* Restore longjmp state. */
     6833            break;
     6834        }
     6835
     6836        case RTTHREADCTXEVENT_RESUMED:
     6837        {
     6838            /* Disable preemption, we don't want to be migrated to another CPU while re-initializing VT-x state. */
     6839            RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     6840            RTThreadPreemptDisable(&PreemptState);
     6841
     6842            /* Initialize the bare minimum state required for HM. This takes care of
     6843               initializing VT-x if necessary (onlined CPUs, local init etc.) */
     6844            HMR0EnterEx(pVCpu);
     6845
     6846            /* Load the active VMCS as the current one. */
     6847            int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
     6848            AssertRC(rc);
     6849
     6850            pVCpu->hm.s.fResumeVM = false;
     6851            pVCpu->hm.s.vmx.fVmxLeaveDone = false;
     6852            pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
     6853
     6854            /* Restore preemption, migrating to another CPU should be fine now. */
     6855            RTThreadPreemptRestore(&PreemptState);
     6856            break;
     6857        }
     6858
     6859        default:
     6860            break;
     6861    }
    67526862}
    67536863
     
    67646874{
    67656875    AssertPtr(pVCpu);
    6766     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    67676876    NOREF(pVM);
    67686877    NOREF(pCtx);
    67696878
    6770     /** @todo this will change with preemption hooks where we only VMCLEAR when
    6771      *        we are actually going to be preempted, not all the time like we
    6772      *        currently do. */
    6773 
    6774     /* Restore host-state bits that VT-x only restores partially. */
    6775     if (pVCpu->hm.s.vmx.fRestoreHostFlags)
    6776     {
    6777         VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
    6778         pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
    6779     }
    6780 
    6781     /*
    6782      * Sync the current VMCS (writes back internal data back into the VMCS region in memory)
    6783      * and mark the VMCS launch-state as "clear".
    6784      */
    6785     int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
    6786     return rc;
     6879    if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     6880    {
     6881        Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     6882
     6883        /*
     6884         * Sync the current VMCS (writes back internal data back into the VMCS region in memory)
     6885         * and mark the VMCS launch-state as "clear".
     6886         */
     6887        int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
     6888        return rc;
     6889    }
     6890
     6891    /* With thread-context hooks, nothing to do here. It's taken care of in VMXR0ThreadCtxCallback(). */
     6892    return VINF_SUCCESS;
    67876893}
    67886894
     
    70307136     * interrupts and handle returning to ring-3 afterwards, but requires very careful state restoration.
    70317137     */
    7032     /** @todo Rework event evaluation and injection to be completely separate. */
     7138    /** @todo Rework event evaluation and injection to be completely separate.
     7139     *  Update: Tried it, problem with handling halts. Control never returns to VT-x
     7140     *        if we exit VT-x with external interrupt pending in a TRPM event. */
    70337141    if (TRPMHasTrap(pVCpu))
    70347142        hmR0VmxTrpmTrapToPendingEvent(pVCpu);
     
    73947502        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    73957503    }
    7396 
    7397     /*
    7398      * Clear the X86_EFL_TF if necessary .
    7399      */
    7400     if (pVCpu->hm.s.fClearTrapFlag)
    7401     {
    7402         int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx);
    7403         AssertRCReturn(rc2, rc2);
    7404         pVCpu->hm.s.fClearTrapFlag = false;
    7405         pCtx->eflags.Bits.u1TF = 0;
    7406     }
    7407 /** @todo there seems to be issues with the resume flag when the monitor trap
    7408  *        flag is pending without being used. Seen early in bios init when
    7409  *        accessing APIC page in prot mode. */
    74107504
    74117505    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
     
    76467740        AssertRCBreak(rc);
    76477741        if (   (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
    7648             && (u64Val & 0xfffffe3c))                           /* Bits 31-9, bits 2-5 MBZ. */
     7742            && (u64Val & 0xfffffe3c))                           /* Bits 31:9, bits 5:2 MBZ. */
    76497743        {
    76507744            HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
     
    77577851            AssertRCBreak(rc);
    77587852            HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
    7759                               VMX_IGS_PERF_GLOBAL_MSR_RESERVED);        /* Bits 63-35, bits 31-2 MBZ. */
     7853                              VMX_IGS_PERF_GLOBAL_MSR_RESERVED);        /* Bits 63:35, bits 31:2 MBZ. */
    77607854        }
    77617855
     
    77927886            AssertRCBreak(rc);
    77937887            HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
    7794                               VMX_IGS_EFER_MSR_RESERVED);               /* Bits 63-12, bit 9, bits 7-1 MBZ. */
     7888                              VMX_IGS_EFER_MSR_RESERVED);               /* Bits 63:12, bit 9, bits 7:1 MBZ. */
    77957889            HMVMX_CHECK_BREAK((u64Val & MSR_K6_EFER_LMA) == (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
    77967890                              VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
     
    80148108        HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
    80158109        HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
    8016         HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED);   /* Bits 11-8 MBZ. */
     8110        HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED);   /* Bits 11:8 MBZ. */
    80178111        HMVMX_CHECK_BREAK(   (pCtx->tr.u32Limit & 0xfff) == 0xfff
    80188112                          || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
     
    81178211            rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
    81188212            AssertRCBreak(rc);
    8119             /* Bits 63-15, Bit 13, Bits 11-4 MBZ. */
     8213            /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
    81208214            HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
    81218215            u32Val = u64Val;    /* For pending debug exceptions checks below. */
     
    81258219            rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
    81268220            AssertRCBreak(rc);
    8127             /* Bits 31-15, Bit 13, Bits 11-4 MBZ. */
     8221            /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
    81288222            HMVMX_CHECK_BREAK(!(u64Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
    81298223        }
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.h

    r47652 r47760  
    3131VMMR0DECL(int)  VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu);
    3232VMMR0DECL(int)  VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
     33VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
    3334VMMR0DECL(int)  VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, bool fEnabledBySystem);
    3435VMMR0DECL(int)  VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     
    4445DECLASM(int)    VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
    4546
     47
    4648# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    4749DECLASM(int)    VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
     
    5052# endif
    5153
    52 /* Cached VMCS accesses -- defined always in the old VT-x code, defined only for 32 hosts on new code. */
    53 #ifdef VMX_USE_CACHED_VMCS_ACCESSES
     54/* Cached VMCS accesses -- defined only for 32 hosts (with 64-bit guest support). */
     55# ifdef VMX_USE_CACHED_VMCS_ACCESSES
    5456VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val);
    5557
     
    6062    return VINF_SUCCESS;
    6163}
    62 #endif
     64# endif
    6365
    6466# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r47645 r47760  
    462462{
    463463    RTThreadCtxHooksRelease(pVCpu->vmm.s.hR0ThreadCtx);
     464}
     465
     466
     467/**
     468 * Registers the thread-context hook for this VCPU.
     469 *
     470 * @param   pVCpu           Pointer to the VMCPU.
     471 * @param   pfnThreadHook   Pointer to the thread-context callback.
     472 * @returns VBox status code.
     473 *
     474 * @thread EMT.
     475 */
     476VMMR0DECL(int) VMMR0ThreadCtxHooksRegister(PVMCPU pVCpu, PFNRTTHREADCTXHOOK pfnThreadHook)
     477{
     478    return RTThreadCtxHooksRegister(pVCpu->vmm.s.hR0ThreadCtx, pfnThreadHook, pVCpu);
     479}
     480
     481
     482/**
     483 * Deregisters the thread-context hook for this VCPU.
     484 *
     485 * @returns VBox status code.
     486 * @param   pVCpu       Pointer to the VMCPU.
     487 * @thread EMT.
     488 */
     489VMMR0DECL(int) VMMR0ThreadCtxHooksDeregister(PVMCPU pVCpu)
     490{
     491    return RTThreadCtxHooksDeregister(pVCpu->vmm.s.hR0ThreadCtx);
    464492}
    465493
     
    830858            if (!HMR0SuspendPending())
    831859            {
     860                /** @todo VMMR0ThreadCtxHooks support. */
    832861                rc = HMR0Enter(pVM, pVCpu);
    833862                if (RT_SUCCESS(rc))
  • trunk/src/VBox/VMM/include/HMInternal.h

    r47718 r47760  
    657657        /** Set if guest was executing in real mode (extra checks). */
    658658        bool                        fWasInRealMode;
     659        /** Whether we've completed the restoration procedure while leaving the inner
     660         *  VT-x context. */
     661        bool                        fVmxLeaveDone;
    659662    } vmx;
    660663
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette