VirtualBox

Changeset 19217 in vbox for trunk


Ignore:
Timestamp:
Apr 27, 2009 3:00:59 PM (16 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
46584
Message:

UVM splitup for SMP guests. Global and local request packets supported.

Location:
trunk
Files:
17 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/types.h

    r18101 r19217  
    9090/** Pointer to a ring-3 (user mode) VM structure. */
    9191typedef R3PTRTYPE(struct UVM *)     PUVM;
     92
     93/** Pointer to a ring-3 (user mode) VMCPU structure. */
     94typedef R3PTRTYPE(struct UVMCPU *)  PUVMCPU;
    9295
    9396/** Virtual CPU ID. */
  • trunk/include/VBox/uvm.h

    r19101 r19217  
    4343    /** Pointer to the UVM structure.  */
    4444    PUVM                            pUVM;
     45    /** Pointer to the VM structure.  */
     46    PVM                             pVM;
     47    /** Pointer to the VMCPU structure.  */
     48    PVMCPU                          pVCpu;
    4549    /** The virtual CPU ID.  */
    4650    RTCPUID                         idCpu;
  • trunk/include/VBox/vm.h

    r19178 r19217  
    8686    VMCPUSTATE volatile     enmState;
    8787
     88    /** Pointer to the ring-3 UVMCPU structure. */
     89    PUVMCPU                 pUVCpu;
    8890    /** Ring-3 Host Context VM Pointer. */
    8991    PVMR3                   pVMR3;
     
    105107     *          following it (to grow into and align the struct size).
    106108     *   */
    107     uint32_t                au32Alignment[HC_ARCH_BITS == 32 ? 9 : 6];
     109    uint32_t                au32Alignment[HC_ARCH_BITS == 32 ? 8 : 4];
    108110
    109111    /** CPUM part. */
     
    283285#define VMCPU_FF_NORMAL_PRIORITY_POST_MASK      (VMCPU_FF_CSAM_SCAN_PAGE)
    284286
    285 /** Normal priority actions. */
     287/** Normal priority VM actions. */
    286288#define VM_FF_NORMAL_PRIORITY_MASK              (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)
     289/** Normal priority VMCPU actions. */
     290#define VMCPU_FF_NORMAL_PRIORITY_MASK           (VMCPU_FF_REQUEST)
    287291
    288292/** Flags to clear before resuming guest execution. */
     
    324328 * @param   fFlag   The flag to set.
    325329 */
    326 #if 1 //def VBOX_WITH_SMP_GUESTS
    327 # define VMCPU_FF_SET(pVCpu, fFlag)    ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag))
    328 #else
    329 # define VMCPU_FF_SET(pVCpu, fFlag)    ASMAtomicOrU32(&(pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions, (fFlag))
    330 #endif
     330#define VMCPU_FF_SET(pVCpu, fFlag)    ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag))
    331331
    332332/** @def VM_FF_CLEAR
     
    351351 * @param   fFlag   The flag to clear.
    352352 */
    353 #if 1 //def VBOX_WITH_SMP_GUESTS
    354 # define VMCPU_FF_CLEAR(pVCpu, fFlag)  ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag))
    355 #else
    356 # define VMCPU_FF_CLEAR(pVCpu, fFlag)  ASMAtomicAndU32(&(pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions, ~(fFlag))
    357 #endif
     353#define VMCPU_FF_CLEAR(pVCpu, fFlag)  ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag))
    358354
    359355/** @def VM_FF_ISSET
     
    371367 * @param   fFlag   The flag to check.
    372368 */
    373 #if 1 //def VBOX_WITH_SMP_GUESTS
    374 # define VMCPU_FF_ISSET(pVCpu, fFlag)  (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
    375 #else
    376 # define VMCPU_FF_ISSET(pVCpu, fFlag)  (((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))
    377 #endif
     369#define VMCPU_FF_ISSET(pVCpu, fFlag)  (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))
    378370
    379371/** @def VM_FF_ISPENDING
     
    391383 * @param   fFlags  The flags to check for.
    392384 */
    393 #if 1 //def VBOX_WITH_SMP_GUESTS
    394 # define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))
    395 #else
    396 # define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fFlags))
    397 #endif
     385#define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))
    398386
    399387/** @def VM_FF_ISPENDING
     
    415403 * @param   fExcpt  The flags that should not be set.
    416404 */
    417 #if 1 //def VBOX_WITH_SMP_GUESTS
    418 # define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) )
    419 #else
    420 # define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fFlags)) && !((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fExcpt)) )
    421 #endif
     405#define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) )
    422406
    423407/** @def VM_IS_EMT
  • trunk/include/VBox/vm.mac

    r19141 r19217  
    107107    .fLocalForcedActions resd 1
    108108    .enmState            resd 1
     109    .pUVCpu              RTR3PTR_RES 1
    109110    .pVMR3               RTR3PTR_RES 1
    110111    .pVMR0               RTR0PTR_RES 1
  • trunk/include/VBox/vmapi.h

    r19173 r19217  
    434434VMMR3DECL(int)  VMR3ReqWait(PVMREQ pReq, unsigned cMillies);
    435435VMMR3DECL(int)  VMR3ReqProcessU(PUVM pUVM, VMREQDEST enmDest);
    436 VMMR3DECL(void) VMR3NotifyFF(PVM pVM, bool fNotifiedREM);
    437 VMMR3DECL(void) VMR3NotifyFFU(PUVM pUVM, bool fNotifiedREM);
     436VMMR3DECL(void) VMR3NotifyGlobalFF(PVM pVM, bool fNotifiedREM);
     437VMMR3DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, bool fNotifiedREM);
     438VMMR3DECL(void) VMR3NotifyCpuFF(PVMCPU pVCpu, bool fNotifiedREM);
     439VMMR3DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVMCpu, bool fNotifiedREM);
    438440VMMR3DECL(int)  VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts);
    439 VMMR3DECL(int)  VMR3WaitU(PUVM pUVM);
     441VMMR3DECL(int)  VMR3WaitU(PUVMCPU pUVMCpu);
    440442VMMR3DECL(RTCPUID)          VMR3GetVMCPUId(PVM pVM);
    441443VMMR3DECL(RTTHREAD)         VMR3GetVMCPUThread(PVM pVM);
  • trunk/src/VBox/VMM/DBGF.cpp

    r19141 r19217  
    123123        rc = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pVM->dbgf.s.enmVMMCmd, enmCmd);
    124124        VM_FF_SET(pVM, VM_FF_DBGF);
    125         VMR3NotifyFF(pVM, false /* didn't notify REM */);
     125        VMR3NotifyGlobalFF(pVM, false /* didn't notify REM */);
    126126    }
    127127    return rc;
  • trunk/src/VBox/VMM/EM.cpp

    r19151 r19217  
    33613361
    33623362    /*
     3363     * Normal priority then. (per-VCPU)
     3364     * (Executed in no particular order.)
     3365     */
     3366    if (    !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
     3367        &&  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
     3368    {
     3369        /*
     3370         * Requests from other threads.
     3371         */
     3372        if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
     3373        {
     3374            rc2 = VMR3ReqProcessU(pVM->pUVM, (VMREQDEST)pVCpu->idCpu);
     3375            if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE)
     3376            {
     3377                Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
     3378                STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
     3379                return rc2;
     3380            }
     3381            UPDATE_RC();
     3382        }
     3383
     3384        /* check that we got them all  */
     3385        Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST)));
     3386    }
     3387
     3388    /*
    33633389     * High priority pre execution chunk last.
    33643390     * (Executed in ascending priority order.)
  • trunk/src/VBox/VMM/PDMDevHlp.cpp

    r19076 r19217  
    24802480    VM_FF_SET(pVM, VM_FF_PDM_DMA);
    24812481    REMR3NotifyDmaPending(pVM);
    2482     VMR3NotifyFF(pVM, true);
     2482    VMR3NotifyGlobalFF(pVM, true);
    24832483}
    24842484
  • trunk/src/VBox/VMM/PDMDevMiscHlp.cpp

    r19141 r19217  
    5555    VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC);
    5656    REMR3NotifyInterruptSet(pVM, pVCpu);
    57     VMR3NotifyFF(pVM, true); /** @todo SMP: notify the right cpu. */
     57    VMR3NotifyCpuFF(pVCpu, true);
    5858}
    5959
     
    158158    VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC);
    159159    REMR3NotifyInterruptSet(pVM, pVCpu);
    160     VMR3NotifyFF(pVM, true);  /** @todo SMP: notify the right cpu. */
     160    VMR3NotifyCpuFF(pVCpu, true);
    161161}
    162162
  • trunk/src/VBox/VMM/TM.cpp

    r19032 r19217  
    14801480        VM_FF_SET(pVM, VM_FF_TIMER);
    14811481        REMR3NotifyTimerPending(pVM);
    1482         VMR3NotifyFF(pVM, true);
     1482        VMR3NotifyGlobalFF(pVM, true);
    14831483        STAM_COUNTER_INC(&pVM->tm.s.StatTimerCallbackSetFF);
    14841484    }
  • trunk/src/VBox/VMM/VM.cpp

    r19141 r19217  
    243243             */
    244244            PVMREQ pReq;
    245             /** @todo SMP: VMREQDEST_ANY -> VMREQDEST_CPU0 */
    246             rc = VMR3ReqCallU(pUVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, 0, (PFNRT)vmR3CreateU,
     245            rc = VMR3ReqCallU(pUVM, VMREQDEST_ANY /* can't use CPU0 here as it's too early (pVM==0) */, &pReq, RT_INDEFINITE_WAIT, 0, (PFNRT)vmR3CreateU,
    247246                              4, pUVM, cCPUs, pfnCFGMConstructor, pvUserCFGM);
    248247            if (RT_SUCCESS(rc))
     
    400399
    401400    AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
    402     AssertRelease(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
    403401
    404402    pUVM->vm.s.ppAtResetNext = &pUVM->vm.s.pAtReset;
     
    406404    pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
    407405    pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
     406
    408407    pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
    409408
     
    411410    for (i = 0; i < cCPUs; i++)
    412411    {
    413         pUVM->aCpus[i].pUVM  = pUVM;
    414         pUVM->aCpus[i].idCpu = i;
     412        pUVM->aCpus[i].pUVM   = pUVM;
     413        pUVM->aCpus[i].idCpu  = i;
    415414    }
    416415
     
    420419    if (RT_SUCCESS(rc))
    421420    {
    422         rc = RTSemEventCreate(&pUVM->vm.s.EventSemWait);
     421        /* Allocate a halt method event semaphore for each VCPU. */
     422        for (i = 0; i < cCPUs; i++)
     423        {
     424            rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
     425            if (RT_FAILURE(rc))
     426                break;
     427        }
     428
    423429        if (RT_SUCCESS(rc))
    424430        {
     
    465471                STAMR3TermUVM(pUVM);
    466472            }
    467             RTSemEventDestroy(pUVM->vm.s.EventSemWait);
     473            for (i = 0; i < cCPUs; i++)
     474            {
     475                RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
     476            }
    468477        }
    469478        RTTlsFree(pUVM->vm.s.idxTLS);
     
    526535        for (uint32_t i = 0; i < pVM->cCPUs; i++)
    527536        {
     537            pVM->aCpus[i].pUVCpu        = &pUVM->aCpus[i];
    528538            pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
    529539            Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD);
     540
     541            pUVM->aCpus[i].pVM          = pVM;
     542            pUVM->aCpus[i].pVCpu        = &pVM->aCpus[i];
    530543        }
    531544
     
    686699        STAM_REG(pVM, &pVM->StatSwitcherRstrRegs,   STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
    687700
    688         STAM_REL_REG(pVM, &pUVM->vm.s.StatHaltYield, STAMTYPE_PROFILE,     "/PROF/VM/Halt/Yield",    STAMUNIT_TICKS_PER_CALL,    "Profiling halted state yielding.");
    689         STAM_REL_REG(pVM, &pUVM->vm.s.StatHaltBlock, STAMTYPE_PROFILE,     "/PROF/VM/Halt/Block",    STAMUNIT_TICKS_PER_CALL,    "Profiling halted state blocking.");
    690         STAM_REL_REG(pVM, &pUVM->vm.s.StatHaltTimers,STAMTYPE_PROFILE,     "/PROF/VM/Halt/Timers",   STAMUNIT_TICKS_PER_CALL,    "Profiling halted state timer tasks.");
     701        for (unsigned iCpu=0;iCpu<pVM->cCPUs;iCpu++)
     702        {
     703            rc = STAMR3RegisterF(pVM, &pUVM->aCpus[iCpu].vm.s.StatHaltYield,  STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", iCpu);
     704            AssertRC(rc);
     705            rc = STAMR3RegisterF(pVM, &pUVM->aCpus[iCpu].vm.s.StatHaltBlock,  STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", iCpu);
     706            AssertRC(rc);
     707            rc = STAMR3RegisterF(pVM, &pUVM->aCpus[iCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", iCpu);
     708            AssertRC(rc);
     709        }
    691710
    692711        STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew,   STAMTYPE_COUNTER,     "/VM/Req/AllocNew",       STAMUNIT_OCCURENCES,        "Number of VMR3ReqAlloc returning a new packet.");
     
    17701789        if (pUVM->pVM)
    17711790            VM_FF_SET(pUVM->pVM, VM_FF_TERMINATE);
    1772         VMR3NotifyFFU(pUVM, true /* fNotifiedREM */);
    1773         if (pUVM->aCpus[i].vm.s.EventSemWait != NIL_RTSEMEVENT) /** @todo remove test when we start initializing it! */
    1774             RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait);
    1775     }
    1776     RTSemEventSignal(pUVM->vm.s.EventSemWait);
     1791        VMR3NotifyGlobalFFU(pUVM, true /* fNotifiedREM */);
     1792        RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait);
     1793    }
    17771794
    17781795    /* Wait for them. */
     
    18021819    /* Cleanup the semaphores. */
    18031820    for (VMCPUID i = 0; i < pUVM->cCpus; i++)
    1804         if (pUVM->aCpus[i].vm.s.EventSemWait != NIL_RTSEMEVENT) /** @todo remove test when we start initializing it! */
    1805         {
    1806             RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
    1807             pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
    1808         }
    1809     RTSemEventDestroy(pUVM->vm.s.EventSemWait);
    1810     pUVM->vm.s.EventSemWait = NIL_RTSEMEVENT;
     1821    {
     1822        RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
     1823        pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
     1824    }
    18111825
    18121826    /*
     
    32753289VMMR3DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
    32763290{
    3277     PUVMCPU pUVMCPU = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
    3278 
    3279     AssertMsg(pUVMCPU, ("RTTlsGet %d failed!\n", pVM->pUVM->vm.s.idxTLS));
    3280     return pUVMCPU->idCpu;
     3291    PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
     3292
     3293    AssertMsg(pUVCpu, ("RTTlsGet %d failed!\n", pVM->pUVM->vm.s.idxTLS));
     3294    return pUVCpu->idCpu;
    32813295}
    32823296
     
    32913305VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
    32923306{
    3293     PUVMCPU pUVMCPU = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
    3294 
    3295     if (!pUVMCPU)
     3307    PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
     3308
     3309    if (!pUVCpu)
    32963310        return NIL_RTNATIVETHREAD;
    32973311
    3298     return pUVMCPU->vm.s.NativeThreadEMT;
     3312    return pUVCpu->vm.s.NativeThreadEMT;
    32993313}
    33003314
     
    33093323VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
    33103324{
    3311     PUVMCPU pUVMCPU = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
    3312 
    3313     if (!pUVMCPU)
     3325    PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
     3326
     3327    if (!pUVCpu)
    33143328        return NIL_RTNATIVETHREAD;
    33153329
    3316     return pUVMCPU->vm.s.NativeThreadEMT;
     3330    return pUVCpu->vm.s.NativeThreadEMT;
    33173331}
    33183332
     
    33273341VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PVM pVM)
    33283342{
    3329     PUVMCPU pUVMCPU = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
    3330 
    3331     if (!pUVMCPU)
     3343    PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
     3344
     3345    if (!pUVCpu)
    33323346        return NIL_RTTHREAD;
    33333347
    3334     return pUVMCPU->vm.s.ThreadEMT;
     3348    return pUVCpu->vm.s.ThreadEMT;
    33353349}
    33363350
     
    33453359VMMR3DECL(RTTHREAD) VMR3GetVMCPUThreadU(PUVM pUVM)
    33463360{
    3347     PUVMCPU pUVMCPU = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
    3348 
    3349     if (!pUVMCPU)
     3361    PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
     3362
     3363    if (!pUVCpu)
    33503364        return NIL_RTTHREAD;
    33513365
    3352     return pUVMCPU->vm.s.ThreadEMT;
    3353 }
    3354 
     3366    return pUVCpu->vm.s.ThreadEMT;
     3367}
     3368
  • trunk/src/VBox/VMM/VMEmt.cpp

    r19141 r19217  
    5454DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArgs)
    5555{
    56     PUVMCPU pUVMCPU = (PUVMCPU)pvArgs;
    57     PUVM    pUVM    = pUVMCPU->pUVM;
    58     RTCPUID idCpu   = pUVMCPU->idCpu;
     56    PUVMCPU pUVCpu = (PUVMCPU)pvArgs;
     57    PUVM    pUVM    = pUVCpu->pUVM;
     58    RTCPUID idCpu   = pUVCpu->idCpu;
    5959    int     rc;
    6060
     
    6262                     ("Invalid arguments to the emulation thread!\n"));
    6363
    64     rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVMCPU);
     64    rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVCpu);
    6565    AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc);
    6666
     
    7474    {
    7575        /* Requested to exit the EMT thread out of sync? (currently only VMR3WaitForResume) */
    76         if (setjmp(pUVMCPU->vm.s.emtJumpEnv) != 0)
     76        if (setjmp(pUVCpu->vm.s.emtJumpEnv) != 0)
    7777        {
    7878            rc = VINF_SUCCESS;
     
    9494                break;
    9595            }
     96
    9697            if (pUVM->vm.s.pReqs)
    9798            {
     
    103104            }
    104105            else
     106            if (pUVCpu->vm.s.pReqs)
     107            {
     108                /*
     109                 * Service execute in EMT request.
     110                 */
     111                rc = VMR3ReqProcessU(pUVM, (VMREQDEST)pUVCpu->idCpu);
     112                Log(("vmR3EmulationThread: Req (cpu=%d) rc=%Rrc, VM state %d -> %d\n", pUVCpu->idCpu, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
     113            }
     114            else
    105115            {
    106116                /*
    107117                 * Nothing important is pending, so wait for something.
    108118                 */
    109                 rc = VMR3WaitU(pUVM);
     119                rc = VMR3WaitU(pUVCpu);
    110120                if (RT_FAILURE(rc))
    111121                    break;
     
    137147                Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
    138148            }
     149            else if (pUVCpu->vm.s.pReqs)
     150            {
     151                /*
     152                 * Service execute in EMT request.
     153                 */
     154                rc = VMR3ReqProcessU(pUVM, (VMREQDEST)pUVCpu->idCpu);
     155                Log(("vmR3EmulationThread: Req (cpu=%d)rc=%Rrc, VM state %d -> %d\n", pUVCpu->idCpu, rc, enmBefore, pVM->enmVMState));
     156            }
    139157            else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
    140158            {
     
    159177                 * Nothing important is pending, so wait for something.
    160178                 */
    161                 rc = VMR3WaitU(pUVM);
     179                rc = VMR3WaitU(pUVCpu);
    162180                if (RT_FAILURE(rc))
    163181                    break;
     
    212230        vmR3DestroyFinalBitFromEMT(pUVM);
    213231
    214         pUVMCPU->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
     232        pUVCpu->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
    215233    }
    216234    Log(("vmR3EmulationThread: EMT is terminated.\n"));
     
    231249     * The request loop.
    232250     */
    233     PUVMCPU pUVMCPU;
     251    PUVMCPU pUVCpu;
    234252    PUVM    pUVM = pVM->pUVM;
    235253    VMSTATE enmBefore;
    236254    int     rc;
    237255
    238     pUVMCPU = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
    239     AssertReturn(pUVMCPU, VERR_INTERNAL_ERROR);
     256    pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
     257    AssertReturn(pUVCpu, VERR_INTERNAL_ERROR);
    240258
    241259    for (;;)
     
    262280            rc = VMR3ReqProcessU(pUVM, VMREQDEST_ANY);
    263281            Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
     282        }
     283        else if (pUVCpu->vm.s.pReqs)
     284        {
     285            /*
     286             * Service execute in EMT request.
     287             */
     288            rc = VMR3ReqProcessU(pUVM, (VMREQDEST)pUVCpu->idCpu);
     289            Log(("vmR3EmulationThread: Req (cpu=%d)rc=%Rrc, VM state %d -> %d\n", pUVCpu->idCpu, rc, enmBefore, pVM->enmVMState));
    264290        }
    265291        else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
     
    285311             * Nothing important is pending, so wait for something.
    286312             */
    287             rc = VMR3WaitU(pUVM);
     313            rc = VMR3WaitU(pUVCpu);
    288314            if (RT_FAILURE(rc))
    289315                break;
     
    314340
    315341    /* Return to the main loop in vmR3EmulationThread, which will clean up for us. */
    316     longjmp(pUVMCPU->vm.s.emtJumpEnv, 1);
     342    longjmp(pUVCpu->vm.s.emtJumpEnv, 1);
    317343}
    318344
     
    341367/**
    342368 * The old halt loop.
    343  *
    344  * @param   pUVM            Pointer to the user mode VM structure.
    345  */
    346 static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVM pUVM, PVMCPU pVCpu, const uint32_t fMask, uint64_t /* u64Now*/)
     369 */
     370static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t /* u64Now*/)
    347371{
    348372    /*
    349373     * Halt loop.
    350374     */
    351     PVM pVM = pUVM->pVM;
     375    PVM    pVM   = pUVCpu->pVM;
     376    PVMCPU pVCpu = pUVCpu->pVCpu;
     377
    352378    int rc = VINF_SUCCESS;
    353     ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
     379    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
    354380    //unsigned cLoops = 0;
    355381    for (;;)
     
    360386         * addition to perhaps set an FF.
    361387         */
    362         STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
     388        STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);
    363389        TMR3TimerQueuesDo(pVM);
    364         STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
     390        STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b);
    365391        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
    366392            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
     
    387413            {
    388414                //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
    389                 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltYield, a);
     415                STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltYield, a);
    390416                RTThreadYield(); /* this is the best we can do here */
    391                 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltYield, a);
     417                STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltYield, a);
    392418            }
    393419            else if (u64NanoTS < 2000000)
    394420            {
    395421                //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
    396                 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
    397                 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1);
    398                 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
     422                STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);
     423                rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1);
     424                STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a);
    399425            }
    400426            else
    401427            {
    402428                //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
    403                 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
    404                 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
    405                 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
     429                STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);
     430                rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
     431                STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a);
    406432            }
    407433            //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
     
    414440            AssertRC(rc != VERR_INTERRUPTED);
    415441            AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
    416             ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
     442            ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
    417443            VM_FF_SET(pVM, VM_FF_TERMINATE);
    418444            rc = VERR_INTERNAL_ERROR;
     
    421447    }
    422448
    423     ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
     449    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    424450    return rc;
    425451}
     
    500526 * the lag has been eliminated.
    501527 */
    502 static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVM pUVM, PVMCPU pVCpu, const uint32_t fMask, uint64_t u64Now)
    503 {
    504     PVM pVM = pUVM->pVM;
     528static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
     529{
     530    PUVM    pUVM    = pUVCpu->pUVM;
     531    PVMCPU  pVCpu   = pUVCpu->pVCpu;
     532    PVM     pVM     = pUVCpu->pVM;
    505533
    506534    /*
     
    515543    if (u32CatchUpPct /* non-zero if catching up */)
    516544    {
    517         if (pUVM->vm.s.Halt.Method12.u64StartSpinTS)
     545        if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
    518546        {
    519547            fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
     
    521549            {
    522550                uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
    523                 fBlockOnce = u64Now - pUVM->vm.s.Halt.Method12.u64LastBlockTS
     551                fBlockOnce = u64Now - pUVCpu->vm.s.Halt.Method12.u64LastBlockTS
    524552                           > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
    525553                                    RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
     
    528556            else
    529557            {
    530                 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
    531                 pUVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
     558                //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
     559                pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
    532560            }
    533561        }
     
    536564            fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
    537565            if (fSpinning)
    538                 pUVM->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
    539         }
    540     }
    541     else if (pUVM->vm.s.Halt.Method12.u64StartSpinTS)
    542     {
    543         //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
    544         pUVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
     566                pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
     567        }
     568    }
     569    else if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
     570    {
     571        //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
     572        pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
    545573    }
    546574
     
    549577     */
    550578    int rc = VINF_SUCCESS;
    551     ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
     579    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
    552580    unsigned cLoops = 0;
    553581    for (;; cLoops++)
     
    556584         * Work the timers and check if we can exit.
    557585         */
    558         STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
     586        STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);
    559587        TMR3TimerQueuesDo(pVM);
    560         STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
     588        STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b);
    561589        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
    562590            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
     
    582610#endif
    583611        {
    584             const uint64_t Start = pUVM->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
     612            const uint64_t Start = pUVCpu->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
    585613            VMMR3YieldStop(pVM);
    586614
    587615            uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
    588             if (cMilliSecs <= pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
     616            if (cMilliSecs <= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
    589617                cMilliSecs = 1;
    590618            else
    591                 cMilliSecs -= pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
     619                cMilliSecs -= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
    592620            //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
    593             STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
    594             rc = RTSemEventWait(pUVM->vm.s.EventSemWait, cMilliSecs);
    595             STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
     621            STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);
     622            rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, cMilliSecs);
     623            STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a);
    596624            if (rc == VERR_TIMEOUT)
    597625                rc = VINF_SUCCESS;
     
    600628                AssertRC(rc != VERR_INTERRUPTED);
    601629                AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
    602                 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
     630                ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
    603631                VM_FF_SET(pVM, VM_FF_TERMINATE);
    604632                rc = VERR_INTERNAL_ERROR;
     
    611639             */
    612640            const uint64_t Elapsed = RTTimeNanoTS() - Start;
    613             pUVM->vm.s.Halt.Method12.cNSBlocked += Elapsed;
     641            pUVCpu->vm.s.Halt.Method12.cNSBlocked += Elapsed;
    614642            if (Elapsed > u64NanoTS)
    615                 pUVM->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
    616             pUVM->vm.s.Halt.Method12.cBlocks++;
    617             if (!(pUVM->vm.s.Halt.Method12.cBlocks & 0xf))
    618             {
    619                 pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVM->vm.s.Halt.Method12.cNSBlockedTooLong / pUVM->vm.s.Halt.Method12.cBlocks;
    620                 if (!(pUVM->vm.s.Halt.Method12.cBlocks & 0x3f))
     643                pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
     644            pUVCpu->vm.s.Halt.Method12.cBlocks++;
     645            if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0xf))
     646            {
     647                pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong / pUVCpu->vm.s.Halt.Method12.cBlocks;
     648                if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0x3f))
    621649                {
    622                     pUVM->vm.s.Halt.Method12.cNSBlockedTooLong = pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
    623                     pUVM->vm.s.Halt.Method12.cBlocks = 0x40;
     650                    pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
     651                    pUVCpu->vm.s.Halt.Method12.cBlocks = 0x40;
    624652                }
    625653            }
     
    636664    //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
    637665
    638     ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
     666    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    639667    return rc;
    640668}
     
    657685 * try take care of the global scheduling of EMT threads.
    658686 */
    659 static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVM pUVM, PVMCPU pVCpu, const uint32_t fMask, uint64_t u64Now)
    660 {
    661     PVM pVM = pUVM->pVM;
     687static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
     688{
     689    PUVM    pUVM  = pUVCpu->pUVM;
     690    PVMCPU  pVCpu = pUVCpu->pVCpu;
     691    PVM     pVM   = pUVCpu->pVM;
    662692
    663693    /*
     
    665695     */
    666696    int rc = VINF_SUCCESS;
    667     ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
     697    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
    668698    unsigned cLoops = 0;
    669699    for (;; cLoops++)
     
    672702         * Work the timers and check if we can exit.
    673703         */
    674         STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
     704        STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);
    675705        TMR3TimerQueuesDo(pVM);
    676         STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
     706        STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b);
    677707        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
    678708            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
     
    699729
    700730            //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
    701             STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, c);
     731            STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, c);
    702732            rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
    703             STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, c);
     733            STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, c);
    704734            if (rc == VERR_INTERRUPTED)
    705735                rc = VINF_SUCCESS;
     
    707737            {
    708738                AssertMsgFailed(("VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc));
    709                 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
     739                ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
    710740                VM_FF_SET(pVM, VM_FF_TERMINATE);
    711741                rc = VERR_INTERNAL_ERROR;
     
    719749        else if (!(cLoops & 0x1fff))
    720750        {
    721             STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltYield, d);
     751            STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltYield, d);
    722752            rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
    723             STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltYield, d);
     753            STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltYield, d);
    724754        }
    725755    }
    726756    //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
    727757
    728     ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
     758    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    729759    return rc;
    730760}
     
    735765 *
    736766 * @returns VBox status code.
    737  * @param   pUVM            Pointer to the user mode VM structure.
    738  */
    739 static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVM pUVM)
    740 {
    741     ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
    742 
    743     PVM    pVM   = pUVM->pVM;
     767 * @param   pUVCpu            Pointer to the user mode VMCPU structure.
     768 */
     769static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVMCPU pUVCpu)
     770{
     771    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
     772
     773    PVM    pVM   = pUVCpu->pUVM->pVM;
    744774    PVMCPU pVCpu = VMMGetCpu(pVM);
    745775
     
    764794        {
    765795            AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
    766             ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
     796            ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
    767797            VM_FF_SET(pVM, VM_FF_TERMINATE);
    768798            rc = VERR_INTERNAL_ERROR;
     
    772802    }
    773803
    774     ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
     804    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    775805    return rc;
    776806}
     
    780810 * The global 1 halt method - VMR3NotifyFF() worker.
    781811 *
    782  * @param   pUVM            Pointer to the user mode VM structure.
     812 * @param   pUVCpu         Pointer to the user mode VMCPU structure.
    783813 * @param   fNotifiedREM    See VMR3NotifyFF().
    784814 */
    785 static DECLCALLBACK(void) vmR3HaltGlobal1NotifyFF(PUVM pUVM, bool fNotifiedREM)
    786 {
    787     if (pUVM->vm.s.fWait)
    788     {
    789         int rc = SUPCallVMMR0Ex(pUVM->pVM->pVMR0, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
     815static DECLCALLBACK(void) vmR3HaltGlobal1NotifyFF(PUVMCPU pUVCpu, bool fNotifiedREM)
     816{
     817    if (pUVCpu->vm.s.fWait)
     818    {
     819        int rc = SUPCallVMMR0Ex(pUVCpu->pVM->pVMR0, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
    790820        AssertRC(rc);
    791821    }
    792822    else if (!fNotifiedREM)
    793         REMR3NotifyFF(pUVM->pVM);
     823        REMR3NotifyFF(pUVCpu->pVM);
    794824}
    795825
     
    799829 *
    800830 * @returns VBox status code.
    801  * @param   pUVM            Pointer to the user mode VM structure.
    802  */
    803 static DECLCALLBACK(int) vmR3BootstrapWait(PUVM pUVM)
    804 {
    805     ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
     831 * @param   pUVMCPU            Pointer to the user mode VMCPU structure.
     832 */
     833static DECLCALLBACK(int) vmR3BootstrapWait(PUVMCPU pUVCpu)
     834{
     835    PUVM pUVM = pUVCpu->pUVM;
     836
     837    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
    806838
    807839    int rc = VINF_SUCCESS;
     
    811843         * Check Relevant FFs.
    812844         */
    813         if (pUVM->vm.s.pReqs)
    814             break;
    815         if (    pUVM->pVM
    816             &&  (   VM_FF_ISPENDING(pUVM->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
    817                  || VMCPU_FF_ISPENDING(VMMGetCpu(pUVM->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
     845        if (pUVM->vm.s.pReqs)   /* global requests pending? */
     846            break;
     847        if (pUVCpu->vm.s.pReqs) /* local requests pending? */
     848            break;
     849
     850        if (    pUVCpu->pVM
     851            &&  (   VM_FF_ISPENDING(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
     852                 || VMCPU_FF_ISPENDING(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
    818853                )
    819854            )
    820855            break;
    821         if (pUVM->vm.s.fTerminateEMT)
     856        if (pUVCpu->vm.s.fTerminateEMT)
    822857            break;
    823858
     
    826861         * anything needs our attention.
    827862         */
    828         rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1000);
     863        rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
    829864        if (rc == VERR_TIMEOUT)
    830865            rc = VINF_SUCCESS;
     
    832867        {
    833868            AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
    834             ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
    835             if (pUVM->pVM)
    836                 VM_FF_SET(pUVM->pVM, VM_FF_TERMINATE);
     869            ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
     870            if (pUVCpu->pVM)
     871                VM_FF_SET(pUVCpu->pVM, VM_FF_TERMINATE);
    837872            rc = VERR_INTERNAL_ERROR;
    838873            break;
     
    841876    }
    842877
    843     ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
     878    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    844879    return rc;
    845880}
     
    849884 * Bootstrap VMR3NotifyFF() worker.
    850885 *
    851  * @param   pUVM            Pointer to the user mode VM structure.
     886 * @param   pUVCpu         Pointer to the user mode VMCPU structure.
    852887 * @param   fNotifiedREM    See VMR3NotifyFF().
    853888 */
    854 static DECLCALLBACK(void) vmR3BootstrapNotifyFF(PUVM pUVM, bool fNotifiedREM)
    855 {
    856     if (pUVM->vm.s.fWait)
    857     {
    858         int rc = RTSemEventSignal(pUVM->vm.s.EventSemWait);
     889static DECLCALLBACK(void) vmR3BootstrapNotifyFF(PUVMCPU pUVCpu, bool fNotifiedREM)
     890{
     891    if (pUVCpu->vm.s.fWait)
     892    {
     893        int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
    859894        AssertRC(rc);
    860895    }
     
    866901 *
    867902 * @returns VBox status code.
    868  * @param   pUVM            Pointer to the user mode VM structure.
    869  */
    870 static DECLCALLBACK(int) vmR3DefaultWait(PUVM pUVM)
    871 {
    872     ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
    873 
    874     PVM    pVM   = pUVM->pVM;
    875     PVMCPU pVCpu = VMMGetCpu(pVM);
     903 * @param   pUVMCPU            Pointer to the user mode VMCPU structure.
     904 */
     905static DECLCALLBACK(int) vmR3DefaultWait(PUVMCPU pUVCpu)
     906{
     907    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
     908
     909    PVM    pVM   = pUVCpu->pVM;
     910    PVMCPU pVCpu = pUVCpu->pVCpu;
    876911    int    rc    = VINF_SUCCESS;
    877912    for (;;)
     
    888923         * anything needs our attention.
    889924         */
    890         rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1000);
     925        rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
    891926        if (rc == VERR_TIMEOUT)
    892927            rc = VINF_SUCCESS;
     
    894929        {
    895930            AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
    896             ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
     931            ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
    897932            VM_FF_SET(pVM, VM_FF_TERMINATE);
    898933            rc = VERR_INTERNAL_ERROR;
     
    902937    }
    903938
    904     ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
     939    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    905940    return rc;
    906941}
     
    910945 * Default VMR3NotifyFF() worker.
    911946 *
    912  * @param   pUVM            Pointer to the user mode VM structure.
     947 * @param   pUVCpu         Pointer to the user mode VMCPU structure.
    913948 * @param   fNotifiedREM    See VMR3NotifyFF().
    914949 */
    915 static DECLCALLBACK(void) vmR3DefaultNotifyFF(PUVM pUVM, bool fNotifiedREM)
    916 {
    917     if (pUVM->vm.s.fWait)
    918     {
    919         int rc = RTSemEventSignal(pUVM->vm.s.EventSemWait);
     950static DECLCALLBACK(void) vmR3DefaultNotifyFF(PUVMCPU pUVCpu, bool fNotifiedREM)
     951{
     952    if (pUVCpu->vm.s.fWait)
     953    {
     954        int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
    920955        AssertRC(rc);
    921956    }
    922957    else if (!fNotifiedREM)
    923         REMR3NotifyFF(pUVM->pVM);
     958        REMR3NotifyFF(pUVCpu->pVM);
    924959}
    925960
     
    938973    DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
    939974    /** The halt function. */
    940     DECLR3CALLBACKMEMBER(int,  pfnHalt,(PUVM pUVM, PVMCPU pVCpu, const uint32_t fMask, uint64_t u64Now));
     975    DECLR3CALLBACKMEMBER(int,  pfnHalt,(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now));
    941976    /** The wait function. */
    942     DECLR3CALLBACKMEMBER(int,  pfnWait,(PUVM pUVM));
     977    DECLR3CALLBACKMEMBER(int,  pfnWait,(PUVMCPU pUVCpu));
    943978    /** The notifyFF function. */
    944     DECLR3CALLBACKMEMBER(void, pfnNotifyFF,(PUVM pUVM, bool fNotifiedREM));
     979    DECLR3CALLBACKMEMBER(void, pfnNotifyFF,(PUVMCPU pUVCpu, bool fNotifiedREM));
    945980} g_aHaltMethods[] =
    946981{
     
    948983    { VMHALTMETHOD_OLD,     NULL,                   NULL,                   vmR3HaltOldDoHalt,      vmR3DefaultWait,        vmR3DefaultNotifyFF },
    949984    { VMHALTMETHOD_1,       vmR3HaltMethod1Init,    NULL,                   vmR3HaltMethod1Halt,    vmR3DefaultWait,        vmR3DefaultNotifyFF },
    950   //{ VMHALTMETHOD_2,       vmR3HaltMethod2Init,    vmR3HaltMethod2Term,    vmR3HaltMethod2DoHalt,  vmR3HaltMethod2Wait,    vmR3HaltMethod2NotifyFF },
    951985    { VMHALTMETHOD_GLOBAL_1,vmR3HaltGlobal1Init,    NULL,                   vmR3HaltGlobal1Halt,    vmR3HaltGlobal1Wait,    vmR3HaltGlobal1NotifyFF },
    952986};
     
    960994 *
    961995 * @param   pVM             VM handle.
     996 * @param   pVCpu           VMCPU handle (NULL if all/global notification)
    962997 * @param   fNotifiedREM    Set if REM have already been notified. If clear the
    963998 *                          generic REMR3NotifyFF() method is called.
    964999 */
    965 VMMR3DECL(void) VMR3NotifyFF(PVM pVM, bool fNotifiedREM)
    966 {
     1000VMMR3DECL(void) VMR3NotifyGlobalFF(PVM pVM, bool fNotifiedREM)
     1001{
     1002    PUVM pUVM = pVM->pUVM;
     1003
    9671004    LogFlow(("VMR3NotifyFF:\n"));
    968     PUVM pUVM = pVM->pUVM;
    969     g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVM, fNotifiedREM);
     1005    /** @todo might want to have a 2nd look at this (SMP) */
     1006    for (unsigned iCpu=0;iCpu<pVM->cCPUs;iCpu++)
     1007    {
     1008        PUVMCPU pUVCpu = pVM->aCpus[iCpu].pUVCpu;
     1009        g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVCpu, fNotifiedREM);
     1010    }
     1011}
     1012
     1013/**
     1014 * Notify the emulation thread (EMT) about pending Forced Action (FF).
     1015 *
     1016 * This function is called by thread other than EMT to make
     1017 * sure EMT wakes up and promptly service an FF request.
     1018 *
     1019 * @param   pVM             VM handle.
     1020 * @param   pVCpu           VMCPU handle (NULL if all/global notification)
     1021 * @param   fNotifiedREM    Set if REM have already been notified. If clear the
     1022 *                          generic REMR3NotifyFF() method is called.
     1023 */
     1024VMMR3DECL(void) VMR3NotifyCpuFF(PVMCPU pVCpu, bool fNotifiedREM)
     1025{
     1026    PUVMCPU pUVCpu = pVCpu->pUVCpu;
     1027    PUVM    pUVM    = pUVCpu->pUVM;
     1028
     1029    LogFlow(("VMR3NotifyCpuFF:\n"));
     1030    g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVCpu, fNotifiedREM);
    9701031}
    9711032
     
    9811042 *                          generic REMR3NotifyFF() method is called.
    9821043 */
    983 VMMR3DECL(void) VMR3NotifyFFU(PUVM pUVM, bool fNotifiedREM)
    984 {
    985     LogFlow(("VMR3NotifyFF:\n"));
    986     g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVM, fNotifiedREM);
     1044VMMR3DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, bool fNotifiedREM)
     1045{
     1046    LogFlow(("VMR3NotifyGlobalFFU:\n"));
     1047    /** @todo might want to have a 2nd look at this (SMP) */
     1048    for (unsigned iCpu=0;iCpu<pUVM->cCpus;iCpu++)
     1049    {
     1050        PUVMCPU pUVCpu = &pUVM->aCpus[iCpu];
     1051        g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVCpu, fNotifiedREM);
     1052    }
     1053}
     1054
     1055/**
     1056 * Notify the emulation thread (EMT) about pending Forced Action (FF).
     1057 *
     1058 * This function is called by thread other than EMT to make
     1059 * sure EMT wakes up and promptly service an FF request.
     1060 *
     1061 * @param   pUVM            Pointer to the user mode VM structure.
     1062 * @param   fNotifiedREM    Set if REM have already been notified. If clear the
     1063 *                          generic REMR3NotifyFF() method is called.
     1064 */
     1065VMMR3DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVCpu, bool fNotifiedREM)
     1066{
     1067    PUVM pUVM = pUVCpu->pUVM;
     1068
     1069    LogFlow(("VMR3NotifyCpuFFU:\n"));
     1070    g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVCpu, fNotifiedREM);
    9871071}
    9881072
     
    10261110     * Record halt averages for the last second.
    10271111     */
    1028     PUVM pUVM = pVM->pUVM;
     1112    PUVMCPU pUVCpu = pVCpu->pUVCpu;   
    10291113    uint64_t u64Now = RTTimeNanoTS();
    1030     int64_t off = u64Now - pUVM->vm.s.u64HaltsStartTS;
     1114    int64_t off = u64Now - pUVCpu->vm.s.u64HaltsStartTS;
    10311115    if (off > 1000000000)
    10321116    {
    1033         if (off > _4G || !pUVM->vm.s.cHalts)
    1034         {
    1035             pUVM->vm.s.HaltInterval = 1000000000 /* 1 sec */;
    1036             pUVM->vm.s.HaltFrequency = 1;
     1117        if (off > _4G || !pUVCpu->vm.s.cHalts)
     1118        {
     1119            pUVCpu->vm.s.HaltInterval = 1000000000 /* 1 sec */;
     1120            pUVCpu->vm.s.HaltFrequency = 1;
    10371121        }
    10381122        else
    10391123        {
    1040             pUVM->vm.s.HaltInterval = (uint32_t)off / pUVM->vm.s.cHalts;
    1041             pUVM->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVM->vm.s.cHalts, 1000000000, (uint32_t)off);
    1042         }
    1043         pUVM->vm.s.u64HaltsStartTS = u64Now;
    1044         pUVM->vm.s.cHalts = 0;
    1045     }
    1046     pUVM->vm.s.cHalts++;
     1124            pUVCpu->vm.s.HaltInterval = (uint32_t)off / pUVCpu->vm.s.cHalts;
     1125            pUVCpu->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVCpu->vm.s.cHalts, 1000000000, (uint32_t)off);
     1126        }
     1127        pUVCpu->vm.s.u64HaltsStartTS = u64Now;
     1128        pUVCpu->vm.s.cHalts = 0;
     1129    }
     1130    pUVCpu->vm.s.cHalts++;
    10471131
    10481132    /*
    10491133     * Do the halt.
    10501134     */
    1051     int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVM, pVCpu, fMask, u64Now);
     1135    PUVM pUVM = pUVCpu->pUVM;
     1136    int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVCpu, fMask, u64Now);
    10521137
    10531138    /*
     
    10691154 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
    10701155 *          case an appropriate status code is returned.
    1071  * @param   pUVM            Pointer to the user mode VM structure.
     1156 * @param   pUVCpu          Pointer to the user mode VMCPU structure.
    10721157 * @thread  The emulation thread.
    10731158 */
    1074 VMMR3DECL(int) VMR3WaitU(PUVM pUVM)
     1159VMMR3DECL(int) VMR3WaitU(PUVMCPU pUVCpu)
    10751160{
    10761161    LogFlow(("VMR3WaitU:\n"));
     
    10791164     * Check Relevant FFs.
    10801165     */
    1081     PVM    pVM   = pUVM->pVM;
     1166    PVM    pVM   = pUVCpu->pVM;
     1167    PVMCPU pVCpu = pUVCpu->pVCpu;
    10821168
    10831169    if (    pVM
    10841170        &&  (   VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
    1085              || VMCPU_FF_ISPENDING(VMMGetCpu(pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
     1171             || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
    10861172            )
    10871173        )
     
    10951181     * doesn't have to special case anything).
    10961182     */
    1097     int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVM);
     1183    PUVM pUVM = pUVCpu->pUVM;
     1184    int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVCpu);
    10981185    LogFlow(("VMR3WaitU: returns %Rrc (FF %#x)\n", rc, pVM ? pVM->fGlobalForcedActions : 0));
    10991186    return rc;
  • trunk/src/VBox/VMM/VMInternal.h

    r18645 r19217  
    262262    PSUPDRVSESSION                  pSession;
    263263
     264    /** Force EMT to terminate. */
     265    bool volatile                   fTerminateEMT;
     266    /** If set the EMT does the final VM cleanup when it exits.
     267     * If clear the VMR3Destroy() caller does so. */
     268    bool                            fEMTDoesTheCleanup;
     269
     270    /** List of registered reset callbacks. */
     271    PVMATRESET                      pAtReset;
     272    /** List of registered reset callbacks. */
     273    PVMATRESET                     *ppAtResetNext;
     274
     275    /** List of registered state change callbacks. */
     276    PVMATSTATE                      pAtState;
     277    /** List of registered state change callbacks. */
     278    PVMATSTATE                     *ppAtStateNext;
     279
     280    /** List of registered error callbacks. */
     281    PVMATERROR                      pAtError;
     282    /** List of registered error callbacks. */
     283    PVMATERROR                     *ppAtErrorNext;
     284
     285    /** List of registered error callbacks. */
     286    PVMATRUNTIMEERROR               pAtRuntimeError;
     287    /** List of registered error callbacks. */
     288    PVMATRUNTIMEERROR              *ppAtRuntimeErrorNext;
     289
     290    /** @name Generic Halt data
     291     * @{
     292     */
     293    /** The current halt method.
     294     * Can be selected by CFGM option 'VM/HaltMethod'. */
     295    VMHALTMETHOD                    enmHaltMethod;
     296    /** The index into g_aHaltMethods of the current halt method. */
     297    uint32_t volatile               iHaltMethod;
     298    /** @} */
     299
     300    union
     301    {
     302       /**
     303        * Method 1 & 2 - Block whenever possible, and when lagging behind
     304        * switch to spinning with regular blocking every 5-200ms (defaults)
     305        * depending on the accumulated lag. The blocking interval is adjusted
     306        * with the average oversleeping of the last 64 times.
     307        *
     308        * The difference between 1 and 2 is that we use native absolute
     309        * time APIs for the blocking instead of the millisecond based IPRT
     310        * interface.
     311        */
     312        struct
     313        {
     314            /** The max interval without blocking (when spinning). */
     315            uint32_t                u32MinBlockIntervalCfg;
     316            /** The minimum interval between blocking (when spinning). */
     317            uint32_t                u32MaxBlockIntervalCfg;
     318            /** The value to divide the current lag by to get the raw blocking interval (when spinning). */
     319            uint32_t                u32LagBlockIntervalDivisorCfg;
     320            /** When to start spinning (lag / nano secs). */
     321            uint32_t                u32StartSpinningCfg;
     322            /** When to stop spinning (lag / nano secs). */
     323            uint32_t                u32StopSpinningCfg;
     324        }                           Method12;
     325    }                               Halt;
     326
     327    /** Pointer to the DBGC instance data. */
     328    void                           *pvDBGC;
     329
     330    /** TLS index for the VMINTUSERPERVMCPU pointer. */
     331    RTTLS                           idxTLS;
     332} VMINTUSERPERVM;
     333
     334/** Pointer to the VM internal data kept in the UVM. */
     335typedef VMINTUSERPERVM *PVMINTUSERPERVM;
     336
     337
     338/**
     339 * VMCPU internal data kept in the UVM.
     340 *
     341 * Almost a copy of VMINTUSERPERVM. Separate data properly later on.
     342 */
     343typedef struct VMINTUSERPERVMCPU
     344{
     345    /** Head of the request queue. Atomic. */
     346    volatile PVMREQ                 pReqs;
     347
     348    /** The handle to the EMT thread. */
     349    RTTHREAD                        ThreadEMT;
     350    /** The native of the EMT thread. */
     351    RTNATIVETHREAD                  NativeThreadEMT;
    264352    /** Wait event semaphore. */
    265353    RTSEMEVENT                      EventSemWait;
     
    275363     * @{
    276364     */
    277     /** The current halt method.
    278      * Can be selected by CFGM option 'VM/HaltMethod'. */
    279     VMHALTMETHOD                    enmHaltMethod;
    280     /** The index into g_aHaltMethods of the current halt method. */
    281     uint32_t volatile               iHaltMethod;
    282365    /** The average time (ns) between two halts in the last second. (updated once per second) */
    283366    uint32_t                        HaltInterval;
     
    320403             * This is 0 when we're not spinning. */
    321404            uint64_t                u64StartSpinTS;
    322 
    323             /** The max interval without blocking (when spinning). */
    324             uint32_t                u32MinBlockIntervalCfg;
    325             /** The minimum interval between blocking (when spinning). */
    326             uint32_t                u32MaxBlockIntervalCfg;
    327             /** The value to divide the current lag by to get the raw blocking interval (when spinning). */
    328             uint32_t                u32LagBlockIntervalDivisorCfg;
    329             /** When to start spinning (lag / nano secs). */
    330             uint32_t                u32StartSpinningCfg;
    331             /** When to stop spinning (lag / nano secs). */
    332             uint32_t                u32StopSpinningCfg;
    333405        }                           Method12;
    334406
     
    376448    /** @} */
    377449
    378 
    379     /** List of registered reset callbacks. */
    380     PVMATRESET                      pAtReset;
    381     /** List of registered reset callbacks. */
    382     PVMATRESET                     *ppAtResetNext;
    383 
    384     /** List of registered state change callbacks. */
    385     PVMATSTATE                      pAtState;
    386     /** List of registered state change callbacks. */
    387     PVMATSTATE                     *ppAtStateNext;
    388 
    389     /** List of registered error callbacks. */
    390     PVMATERROR                      pAtError;
    391     /** List of registered error callbacks. */
    392     PVMATERROR                     *ppAtErrorNext;
    393 
    394     /** List of registered error callbacks. */
    395     PVMATRUNTIMEERROR               pAtRuntimeError;
    396     /** List of registered error callbacks. */
    397     PVMATRUNTIMEERROR              *ppAtRuntimeErrorNext;
    398 
    399     /** Pointer to the DBGC instance data. */
    400     void                           *pvDBGC;
    401 
    402     /** TLS index for the VMINTUSERPERVMCPU pointer. */
    403     RTTLS                           idxTLS;
    404 } VMINTUSERPERVM;
    405 
    406 /** Pointer to the VM internal data kept in the UVM. */
    407 typedef VMINTUSERPERVM *PVMINTUSERPERVM;
    408 
    409 
    410 /**
    411  * VMCPU internal data kept in the UVM.
    412  *
    413  * Almost a copy of VMINTUSERPERVM. Separate data properly later on.
    414  */
    415 typedef struct VMINTUSERPERVMCPU
    416 {
    417     /** Head of the request queue. Atomic. */
    418     volatile PVMREQ                 pReqs;
    419 
    420     /** The handle to the EMT thread. */
    421     RTTHREAD                        ThreadEMT;
    422     /** The native of the EMT thread. */
    423     RTNATIVETHREAD                  NativeThreadEMT;
    424     /** Wait event semaphore. */
    425     RTSEMEVENT                      EventSemWait;
    426     /** Wait/Idle indicator. */
    427     bool volatile                   fWait;
    428     /** Force EMT to terminate. */
    429     bool volatile                   fTerminateEMT;
    430     /** If set the EMT does the final VM cleanup when it exits.
    431      * If clear the VMR3Destroy() caller does so. */
    432     bool                            fEMTDoesTheCleanup;
    433 
    434     /** @name Generic Halt data
    435      * @{
    436      */
    437     /** The current halt method.
    438      * Can be selected by CFGM option 'VM/HaltMethod'. */
    439     VMHALTMETHOD                    enmHaltMethod;
    440     /** The index into g_aHaltMethods of the current halt method. */
    441     uint32_t volatile               iHaltMethod;
    442     /** The average time (ns) between two halts in the last second. (updated once per second) */
    443     uint32_t                        HaltInterval;
    444     /** The average halt frequency for the last second. (updated once per second) */
    445     uint32_t                        HaltFrequency;
    446     /** The number of halts in the current period. */
    447     uint32_t                        cHalts;
    448     uint32_t                        padding; /**< alignment padding. */
    449     /** When we started counting halts in cHalts (RTTimeNanoTS). */
    450     uint64_t                        u64HaltsStartTS;
    451     /** @} */
    452 
    453     /** Union containing data and config for the different halt algorithms. */
    454     union
    455     {
    456        /**
    457         * Method 1 & 2 - Block whenever possible, and when lagging behind
    458         * switch to spinning with regular blocking every 5-200ms (defaults)
    459         * depending on the accumulated lag. The blocking interval is adjusted
    460         * with the average oversleeping of the last 64 times.
    461         *
    462         * The difference between 1 and 2 is that we use native absolute
    463         * time APIs for the blocking instead of the millisecond based IPRT
    464         * interface.
    465         */
    466         struct
    467         {
    468             /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
    469             uint32_t                cBlocks;
    470             /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
    471             uint64_t                cNSBlockedTooLongAvg;
    472             /** Total time spend oversleeping when blocking. */
    473             uint64_t                cNSBlockedTooLong;
    474             /** Total time spent blocking. */
    475             uint64_t                cNSBlocked;
    476             /** The timestamp (RTTimeNanoTS) of the last block. */
    477             uint64_t                u64LastBlockTS;
    478 
    479             /** When we started spinning relentlessly in order to catch up some of the oversleeping.
    480              * This is 0 when we're not spinning. */
    481             uint64_t                u64StartSpinTS;
    482 
    483             /** The max interval without blocking (when spinning). */
    484             uint32_t                u32MinBlockIntervalCfg;
    485             /** The minimum interval between blocking (when spinning). */
    486             uint32_t                u32MaxBlockIntervalCfg;
    487             /** The value to divide the current lag by to get the raw blocking interval (when spinning). */
    488             uint32_t                u32LagBlockIntervalDivisorCfg;
    489             /** When to start spinning (lag / nano secs). */
    490             uint32_t                u32StartSpinningCfg;
    491             /** When to stop spinning (lag / nano secs). */
    492             uint32_t                u32StopSpinningCfg;
    493         }                           Method12;
    494 
    495 #if 0
    496        /**
    497         * Method 3 & 4 - Same as method 1 & 2 respectivly, except that we
    498         * sprinkle it with yields.
    499         */
    500        struct
    501        {
    502            /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
    503            uint32_t                 cBlocks;
    504            /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
    505            uint64_t                 cBlockedTooLongNSAvg;
    506            /** Total time spend oversleeping when blocking. */
    507            uint64_t                 cBlockedTooLongNS;
    508            /** Total time spent blocking. */
    509            uint64_t                 cBlockedNS;
    510            /** The timestamp (RTTimeNanoTS) of the last block. */
    511            uint64_t                 u64LastBlockTS;
    512 
    513            /** How many times we've yielded while cBlockedNS and cBlockedTooLongNS has been accumulating. */
    514            uint32_t                 cYields;
    515            /** Avg. time spend oversleeping when yielding. */
    516            uint32_t                 cYieldTooLongNSAvg;
    517            /** Total time spend oversleeping when yielding. */
    518            uint64_t                 cYieldTooLongNS;
    519            /** Total time spent yielding. */
    520            uint64_t                 cYieldedNS;
    521            /** The timestamp (RTTimeNanoTS) of the last block. */
    522            uint64_t                 u64LastYieldTS;
    523 
    524            /** When we started spinning relentlessly in order to catch up some of the oversleeping. */
    525            uint64_t                 u64StartSpinTS;
    526        }                            Method34;
    527 #endif
    528     }                               Halt;
    529 
    530     /** Profiling the halted state; yielding vs blocking.
    531      * @{ */
    532     STAMPROFILE                     StatHaltYield;
    533     STAMPROFILE                     StatHaltBlock;
    534     STAMPROFILE                     StatHaltTimers;
    535     STAMPROFILE                     StatHaltPoll;
    536     /** @} */
    537 
    538     /** Pointer to the DBGC instance data. */
    539     void                           *pvDBGC;
    540 
    541450    /** vmR3EmulationThread longjmp buffer. Must be last in the structure. */
    542451    jmp_buf                         emtJumpEnv;
  • trunk/src/VBox/VMM/VMMAll/PDMAllQueue.cpp

    r19141 r19217  
    9494#ifdef IN_RING3
    9595        REMR3NotifyQueuePending(pVM); /** @todo r=bird: we can remove REMR3NotifyQueuePending and let VMR3NotifyFF do the work. */
    96         VMR3NotifyFF(pVM, true);
     96        VMR3NotifyGlobalFF(pVM, true);
    9797#endif
    9898    }
  • trunk/src/VBox/VMM/VMMAll/TMAll.cpp

    r19032 r19217  
    146146#ifdef IN_RING3
    147147        REMR3NotifyTimerPending(pVM);
    148         VMR3NotifyFF(pVM, true);
     148        VMR3NotifyGlobalFF(pVM, true);
    149149#endif
    150150    }
  • trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp

    r19141 r19217  
    348348#ifdef IN_RING3
    349349            REMR3NotifyTimerPending(pVM);
    350             VMR3NotifyFF(pVM, true);
     350            VMR3NotifyGlobalFF(pVM, true);
    351351#endif
    352352        }
     
    422422#ifdef IN_RING3
    423423            REMR3NotifyTimerPending(pVM);
    424             VMR3NotifyFF(pVM, true);
     424            VMR3NotifyGlobalFF(pVM, true);
    425425#endif
    426426            STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
     
    499499#ifdef IN_RING3
    500500                REMR3NotifyTimerPending(pVM);
    501                 VMR3NotifyFF(pVM, true);
     501                VMR3NotifyGlobalFF(pVM, true);
    502502#endif
    503503                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
     
    526526#ifdef IN_RING3
    527527                REMR3NotifyTimerPending(pVM);
    528                 VMR3NotifyFF(pVM, true);
     528                VMR3NotifyGlobalFF(pVM, true);
    529529#endif
    530530                STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
  • trunk/src/VBox/VMM/VMReq.cpp

    r19179 r19217  
    401401                    VERR_VM_REQUEST_INVALID_TYPE);
    402402    AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
    403     AssertMsgReturn(enmDest == VMREQDEST_ANY || enmDest == VMREQDEST_BROADCAST || (unsigned)enmDest < pUVM->pVM->cCPUs, ("Invalid destination %d (max=%d)\n", enmDest, pUVM->pVM->cCPUs), VERR_INVALID_PARAMETER);
     403    AssertMsgReturn(enmDest == VMREQDEST_ANY || enmDest == VMREQDEST_BROADCAST || (unsigned)enmDest < pUVM->cCpus, ("Invalid destination %d (max=%d)\n", enmDest, pUVM->cCpus), VERR_INVALID_PARAMETER);
    404404
    405405    /*
     
    614614                    VERR_VM_REQUEST_INVALID_TYPE);
    615615
    616 /** @todo SMP: Temporary hack until the unicast and broadcast cases has been
    617  *        implemented correctly below. It asserts + hangs now. */
    618 if (pReq->enmDest != VMREQDEST_ANY)
    619     pReq->enmDest = VMREQDEST_ANY;
    620 
    621 
    622616    /*
    623617     * Are we the EMT or not?
     
    632626        unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags;     /* volatile paranoia */
    633627
    634         for (unsigned i=0;i<pUVM->pVM->cCPUs;i++)
     628        for (unsigned i=0;i<pUVM->cCpus;i++)
    635629        {
    636630            PVMCPU pVCpu = &pUVM->pVM->aCpus[i];
     
    655649                if (pUVM->pVM)
    656650                    VMCPU_FF_SET(pVCpu, VMCPU_FF_REQUEST);
    657                 /* @todo: VMR3NotifyFFU*/
    658                 AssertFailed();
    659                 VMR3NotifyFFU(pUVM, false);
     651                VMR3NotifyCpuFFU(pUVCpu, false);
    660652
    661653                /*
     
    685677        unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags;     /* volatile paranoia */
    686678
     679        /* Fetch the right UVMCPU */
     680        pUVCpu = &pUVM->aCpus[idTarget];
     681
    687682        /*
    688683         * Insert it.
     
    701696        if (pUVM->pVM)
    702697            VMCPU_FF_SET(pVCpu, VMCPU_FF_REQUEST);
    703         /* @todo: VMR3NotifyFFU*/
    704         AssertFailed();
    705         VMR3NotifyFFU(pUVM, false);
     698        VMR3NotifyCpuFFU(pUVCpu, false);
    706699
    707700        /*
     
    733726        if (pUVM->pVM)
    734727            VM_FF_SET(pUVM->pVM, VM_FF_REQUEST);
    735         VMR3NotifyFFU(pUVM, false);
     728        VMR3NotifyGlobalFFU(pUVM, false);
    736729
    737730        /*
     
    840833     * Process loop.
    841834     *
    842      * We do not repeat the outer loop if we've got an informationtional status code
     835     * We do not repeat the outer loop if we've got an informational status code
    843836     * since that code needs processing by our caller.
    844837     */
     
    860853            ppReqs = (void * volatile *)&pUVM->aCpus[enmDest].vm.s.pReqs;
    861854            if (RT_LIKELY(pUVM->pVM))
    862             {
    863                 PVMCPU pVCpu = &pUVM->pVM->aCpus[enmDest];
    864 
    865                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_REQUEST);
    866             }
     855                VMCPU_FF_CLEAR(&pUVM->pVM->aCpus[enmDest], VMCPU_FF_REQUEST);
    867856        }
    868857
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette