VirtualBox

Changeset 19395 in vbox


Ignore:
Timestamp:
May 5, 2009 8:28:42 PM (16 years ago)
Author:
vboxsync
Message:

GVMM,VM: Register the other EMTs or we assert painfully in gvmmR0ByVMAndEMT. A couple of todos and stuff.

Location:
trunk
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/gvm.h

    r19381 r19395  
    4242typedef struct GVMCPU
    4343{
    44     /* VCPU id (0 - (pVM->cCPUs - 1) */
    45     uint32_t        idCpu;
     44    /** VCPU id (0 - (pVM->cCPUs - 1). */
     45    VMCPUID         idCpu;
    4646
    4747    /** Handle to the EMT thread. */
     
    8383    /** The ring-0 mapping of the VM structure. */
    8484    PVM             pVM;
    85     /** Number of VCPUs (same as pVM->cCPUs) */
    86     uint32_t        cCPUs;
     85    /** Number of Virtual CPUs, i.e. how many entries there are in aCpus.
     86     * Same same as PVM::cCPUs. */
     87    uint32_t        cCpus;
    8788    uint32_t        padding;
    8889
  • trunk/include/VBox/gvmm.h

    r19382 r19395  
    125125GVMMR0DECL(int)     GVMMR0QueryConfig(PSUPDRVSESSION pSession, const char *pszName, uint64_t *pu64Value);
    126126
    127 GVMMR0DECL(int)     GVMMR0CreateVM(PSUPDRVSESSION pSession, uint32_t cCPUs, PVM *ppVM);
     127GVMMR0DECL(int)     GVMMR0CreateVM(PSUPDRVSESSION pSession, uint32_t cCpus, PVM *ppVM);
    128128GVMMR0DECL(int)     GVMMR0InitVM(PVM pVM);
    129129GVMMR0DECL(void)    GVMMR0DoneInitVM(PVM pVM);
    130130GVMMR0DECL(bool)    GVMMR0DoingTermVM(PVM pVM, PGVM pGVM);
    131131GVMMR0DECL(int)     GVMMR0DestroyVM(PVM pVM);
    132 GVMMR0DECL(int)     GVMMR0RegisterVCpu(PVM pVM, unsigned idCpu);
     132GVMMR0DECL(int)     GVMMR0RegisterVCpu(PVM pVM, VMCPUID idCpu);
    133133GVMMR0DECL(PGVM)    GVMMR0ByHandle(uint32_t hGVM);
    134134GVMMR0DECL(PGVM)    GVMMR0ByVM(PVM pVM);
    135 GVMMR0DECL(int)     GVMMR0ByVMAndEMT(PVM pVM, unsigned idCpu, PGVM *ppGVM);
     135GVMMR0DECL(int)     GVMMR0ByVMAndEMT(PVM pVM, VMCPUID idCpu, PGVM *ppGVM);
    136136GVMMR0DECL(PVM)     GVMMR0GetVMByHandle(uint32_t hGVM);
    137137GVMMR0DECL(PVM)     GVMMR0GetVMByEMT(RTNATIVETHREAD hEMT);
    138 GVMMR0DECL(int)     GVMMR0SchedHalt(PVM pVM, unsigned idCpu, uint64_t u64ExpireGipTime);
    139 GVMMR0DECL(int)     GVMMR0SchedWakeUp(PVM pVM, unsigned idCpu);
    140 GVMMR0DECL(int)     GVMMR0SchedPoll(PVM pVM, unsigned idCpu, bool fYield);
     138GVMMR0DECL(int)     GVMMR0SchedHalt(PVM pVM, VMCPUID idCpu, uint64_t u64ExpireGipTime);
     139GVMMR0DECL(int)     GVMMR0SchedWakeUp(PVM pVM, VMCPUID idCpu);
     140GVMMR0DECL(int)     GVMMR0SchedPoll(PVM pVM, VMCPUID idCpu, bool fYield);
    141141GVMMR0DECL(int)     GVMMR0QueryStatistics(PGVMMSTATS pStats, PSUPDRVSESSION pSession, PVM pVM);
    142142GVMMR0DECL(int)     GVMMR0ResetStatistics(PCGVMMSTATS pStats, PSUPDRVSESSION pSession, PVM pVM);
     
    153153    PSUPDRVSESSION  pSession;
    154154    /** Number of virtual CPUs for the new VM. (IN) */
    155     uint32_t        cCPUs;
     155    uint32_t        cCpus;
    156156    /** Pointer to the ring-3 mapping of the shared VM structure on return. (OUT) */
    157157    PVMR3           pVMR3;
  • trunk/src/VBox/VMM/VM.cpp

    r19322 r19395  
    237237            /*
    238238             * Call vmR3CreateU in the EMT thread and wait for it to finish.
     239             *
     240             * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
     241             *       submitting a request to a specific VCPU without a pVM. So, to make
     242             *       sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
     243             *       that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
    239244             */
    240245            PVMREQ pReq;
    241             rc = VMR3ReqCallU(pUVM, VMCPUID_ANY /* can't use CPU0 here as it's too early (pVM==0) */, &pReq, RT_INDEFINITE_WAIT, 0, (PFNRT)vmR3CreateU,
    242                               4, pUVM, cCPUs, pfnCFGMConstructor, pvUserCFGM);
     246            rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, 0, (PFNRT)vmR3CreateU, 4,
     247                              pUVM, cCPUs, pfnCFGMConstructor, pvUserCFGM);
    243248            if (RT_SUCCESS(rc))
    244249            {
     
    379384 *
    380385 * @returns VBox status code.
    381  * @param   cCPUs   Number of virtual CPUs
     386 * @param   cCpus   Number of virtual CPUs
    382387 * @param   ppUVM   Where to store the UVM pointer.
    383388 */
    384 static int vmR3CreateUVM(uint32_t cCPUs, PUVM *ppUVM)
     389static int vmR3CreateUVM(uint32_t cCpus, PUVM *ppUVM)
    385390{
    386391    uint32_t i;
     
    389394     * Create and initialize the UVM.
    390395     */
    391     PUVM pUVM = (PUVM)RTMemAllocZ(RT_OFFSETOF(UVM, aCpus[cCPUs]));
     396    PUVM pUVM = (PUVM)RTMemAllocZ(RT_OFFSETOF(UVM, aCpus[cCpus]));
    392397    AssertReturn(pUVM, VERR_NO_MEMORY);
    393398    pUVM->u32Magic = UVM_MAGIC;
    394     pUVM->cCpus = cCPUs;
     399    pUVM->cCpus = cCpus;
    395400
    396401    AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
     
    404409
    405410    /* Initialize the VMCPU array in the UVM. */
    406     for (i = 0; i < cCPUs; i++)
     411    for (i = 0; i < cCpus; i++)
    407412    {
    408413        pUVM->aCpus[i].pUVM   = pUVM;
     
    416421    {
    417422        /* Allocate a halt method event semaphore for each VCPU. */
    418         for (i = 0; i < cCPUs; i++)
     423        for (i = 0; i < cCpus; i++)
    419424        {
    420425            rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
     
    440445                         * Start the emulation threads for all VMCPUs.
    441446                         */
    442                         for (i = 0; i < cCPUs; i++)
     447                        for (i = 0; i < cCpus; i++)
    443448                        {
    444449                            rc = RTThreadCreate(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i], _1M,
     
    467472                STAMR3TermUVM(pUVM);
    468473            }
    469             for (i = 0; i < cCPUs; i++)
     474            for (i = 0; i < cCpus; i++)
    470475            {
    471476                RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
     477                pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
    472478            }
    473479        }
     
    484490 * @thread EMT
    485491 */
    486 static int vmR3CreateU(PUVM pUVM, uint32_t cCPUs, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
     492static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
    487493{
    488494    int rc = VINF_SUCCESS;
     
    510516    CreateVMReq.pVMR0           = NIL_RTR0PTR;
    511517    CreateVMReq.pVMR3           = NULL;
    512     CreateVMReq.cCPUs           = cCPUs;
     518    CreateVMReq.cCpus           = cCpus;
    513519    rc = SUPCallVMMR0Ex(NIL_RTR0PTR, 0 /* VCPU 0 */, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
    514520    if (RT_SUCCESS(rc))
     
    518524        AssertRelease(pVM->pVMR0 == CreateVMReq.pVMR0);
    519525        AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
    520         AssertRelease(pVM->cCPUs == cCPUs);
     526        AssertRelease(pVM->cCPUs == cCpus);
    521527        AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
    522528
     
    571577                rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "NumCPUs", &cCPUsCfg, 1);
    572578                AssertLogRelMsgRC(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc));
    573                 if (RT_SUCCESS(rc) && cCPUsCfg != cCPUs)
     579                if (RT_SUCCESS(rc) && cCPUsCfg != cCpus)
    574580                {
    575581                    AssertLogRelMsgFailed(("Configuration error: \"NumCPUs\"=%RU32 and VMR3CreateVM::cCPUs=%RU32 does not match!\n",
    576                                            cCPUsCfg, cCPUs));
     582                                           cCPUsCfg, cCpus));
    577583                    rc = VERR_INVALID_PARAMETER;
    578584                }
     
    667673}
    668674
     675/**
     676 * Register the calling EMT with GVM.
     677 *
     678 * @returns VBox status code.
     679 * @param   pVM         The VM handle.
     680 * @param   idCpu       The Virtual CPU ID.
     681 */
     682static DECLCALLBACK(int) vmR3RegisterEMT(PVM pVM, VMCPUID idCpu)
     683{
     684    Assert(VMMGetCpuId(pVM) == idCpu);
     685    int rc = SUPCallVMMR0Ex(pVM->pVMR0, idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
     686    if (RT_FAILURE(rc))
     687        LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
     688    return rc;
     689}
     690
    669691
    670692/**
     
    674696{
    675697    int rc;
     698
     699    /*
     700     * Register the other EMTs with GVM.
     701     */
     702    for (VMCPUID idCpu = 1; idCpu < pVM->cCPUs; idCpu++)
     703    {
     704        PVMREQ pReq;
     705        rc = VMR3ReqCallU(pUVM, idCpu, &pReq, RT_INDEFINITE_WAIT, 0 /*fFlags*/,
     706                          (PFNRT)vmR3RegisterEMT, 2, pVM, idCpu);
     707        if (RT_SUCCESS(rc))
     708            rc = pReq->iStatus;
     709        VMR3ReqFree(pReq);
     710        if (RT_FAILURE(rc))
     711            return rc;
     712    }
    676713
    677714    /*
  • trunk/src/VBox/VMM/VMEmt.cpp

    r19300 r19395  
    4444#include <iprt/time.h>
    4545
    46 
    47 /**
    48  * The emulation thread.
     46/*******************************************************************************
     47*   Internal Functions                                                         *
     48*******************************************************************************/
     49int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu);
     50
     51
     52/**
     53 * The emulation thread main function.
    4954 *
    5055 * @returns Thread exit code.
    5156 * @param   ThreadSelf  The handle to the executing thread.
    52  * @param   pvArgs      Pointer to the user mode VM structure (UVM).
     57 * @param   pvArgs      Pointer to the user mode per-VCpu structure (UVMPCU).
    5358 */
    5459DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArgs)
    5560{
    5661    PUVMCPU pUVCpu = (PUVMCPU)pvArgs;
    57     PUVM    pUVM    = pUVCpu->pUVM;
    58     RTCPUID idCpu   = pUVCpu->idCpu;
     62    return vmR3EmulationThreadWithId(ThreadSelf, pUVCpu, pUVCpu->idCpu);
     63}
     64
     65
     66/**
     67 * The emulation thread main function, with Virtual CPU ID for debugging.
     68 *
     69 * @returns Thread exit code.
     70 * @param   ThreadSelf  The handle to the executing thread.
     71 * @param   pUVCpu      Pointer to the user mode per-VCpu structure.
     72 * @param   idCpu       The virtual CPU ID, for backtrace purposes.
     73 */
     74int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu)
     75{
     76    PUVM    pUVM = pUVCpu->pUVM;
    5977    int     rc;
    6078
     
    95113            }
    96114
     115            /*
     116             * Only the first VCPU may initialize the VM during early init
     117             * and must therefore service all VMCPUID_ANY requests.
     118             * See also VMR3Create
     119             */
    97120            if (    pUVM->vm.s.pReqs
    98                 &&  pUVCpu->idCpu == 0 /* Only the first VCPU may initialize the VM during early init */)
     121                &&  pUVCpu->idCpu == 0)
    99122            {
    100123                /*
    101                  * Service execute in EMT request.
     124                 * Service execute in any EMT request.
    102125                 */
    103126                rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY);
    104127                Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
    105128            }
    106             else
    107             if (pUVCpu->vm.s.pReqs)
     129            else if (pUVCpu->vm.s.pReqs)
    108130            {
    109131                /*
    110                  * Service execute in EMT request.
     132                 * Service execute in specific EMT request.
    111133                 */
    112134                rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu);
     
    143165            {
    144166                /*
    145                  * Service execute in EMT request.
     167                 * Service execute in any EMT request.
    146168                 */
    147169                rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY);
     
    151173            {
    152174                /*
    153                  * Service execute in EMT request.
     175                 * Service execute in specific EMT request.
    154176                 */
    155177                rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu);
     
    692714    PVMCPU  pVCpu = pUVCpu->pVCpu;
    693715    PVM     pVM   = pUVCpu->pVM;
     716    Assert(VMMGetCpu(pVM) == pVCpu);
    694717
    695718    /*
     
    775798    PVM    pVM   = pUVCpu->pUVM->pVM;
    776799    PVMCPU pVCpu = VMMGetCpu(pVM);
     800    Assert(pVCpu->idCpu == pUVCpu->idCpu);
    777801
    778802    int rc = VINF_SUCCESS;
  • trunk/src/VBox/VMM/VMMAll/REMAll.cpp

    r18927 r19395  
    5858    {
    5959        /* Tell the recompiler to flush its TLB. */
     60#ifndef DEBUG_bird /* temporary */
    6061        Assert(pVM->cCPUs == 1); /* @todo SMP */
     62#endif
    6163        CPUMSetChangedFlags(VMMGetCpu(pVM), CPUM_CHANGED_GLOBAL_TLB_FLUSH);
    6264        pVM->rem.s.cInvalidatedPages = 0;
  • trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp

    r19385 r19395  
    189189static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle);
    190190static int gvmmR0ByVM(PVM pVM, PGVM *ppGVM, PGVMM *ppGVMM, bool fTakeUsedLock);
    191 static int gvmmR0ByVMAndEMT(PVM pVM, unsigned idCpu, PGVM *ppGVM, PGVMM *ppGVMM);
     191static int gvmmR0ByVMAndEMT(PVM pVM, VMCPUID idCpu, PGVM *ppGVM, PGVMM *ppGVMM);
    192192
    193193
     
    486486    pReq->pVMR0 = NULL;
    487487    pReq->pVMR3 = NIL_RTR3PTR;
    488     int rc = GVMMR0CreateVM(pReq->pSession, pReq->cCPUs, &pVM);
     488    int rc = GVMMR0CreateVM(pReq->pSession, pReq->cCpus, &pVM);
    489489    if (RT_SUCCESS(rc))
    490490    {
     
    503503 * @returns VBox status code.
    504504 * @param   pSession    The support driver session.
    505  * @param   cCPUs       Number of virtual CPUs for the new VM.
     505 * @param   cCpus       Number of virtual CPUs for the new VM.
    506506 * @param   ppVM        Where to store the pointer to the VM structure.
    507507 *
    508508 * @thread  EMT.
    509509 */
    510 GVMMR0DECL(int) GVMMR0CreateVM(PSUPDRVSESSION pSession, uint32_t cCPUs, PVM *ppVM)
     510GVMMR0DECL(int) GVMMR0CreateVM(PSUPDRVSESSION pSession, uint32_t cCpus, PVM *ppVM)
    511511{
    512512    LogFlow(("GVMMR0CreateVM: pSession=%p\n", pSession));
     
    517517    *ppVM = NULL;
    518518
    519     if (    cCPUs == 0
    520         ||  cCPUs > VMCPU_MAX_CPU_COUNT)
     519    if (    cCpus == 0
     520        ||  cCpus > VMCPU_MAX_CPU_COUNT)
    521521        return VERR_INVALID_PARAMETER;
    522522
     
    571571                     * Allocate the global VM structure (GVM) and initialize it.
    572572                     */
    573                     PGVM pGVM = (PGVM)RTMemAllocZ(RT_UOFFSETOF(GVM, aCpus[cCPUs]));
     573                    PGVM pGVM = (PGVM)RTMemAllocZ(RT_UOFFSETOF(GVM, aCpus[cCpus]));
    574574                    if (pGVM)
    575575                    {
     
    577577                        pGVM->hSelf     = iHandle;
    578578                        pGVM->pVM       = NULL;
    579                         pGVM->cCPUs     = cCPUs;
     579                        pGVM->cCpus     = cCpus;
    580580
    581581                        gvmmR0InitPerVMData(pGVM);
     
    585585                         * Allocate the shared VM structure and associated page array.
    586586                         */
    587                         const uint32_t  cbVM   = RT_UOFFSETOF(VM, aCpus[cCPUs]);
     587                        const uint32_t  cbVM   = RT_UOFFSETOF(VM, aCpus[cCpus]);
    588588                        const uint32_t  cPages = RT_ALIGN_32(cbVM, PAGE_SIZE) >> PAGE_SHIFT;
    589589                        rc = RTR0MemObjAllocLow(&pGVM->gvmm.s.VMMemObj, cPages << PAGE_SHIFT, false /* fExecutable */);
     
    597597                            pVM->hSelf      = iHandle;
    598598                            pVM->cbSelf     = cbVM;
    599                             pVM->cCPUs      = cCPUs;
     599                            pVM->cCPUs      = cCpus;
    600600                            pVM->offVMCPU   = RT_UOFFSETOF(VM, aCpus);
    601601
     
    622622
    623623                                    /* Initialize all the VM pointers. */
    624                                     for (uint32_t i = 0; i < cCPUs; i++)
     624                                    for (uint32_t i = 0; i < cCpus; i++)
    625625                                    {
    626626                                        pVM->aCpus[i].pVMR0 = pVM;
     
    709709    pGVM->gvmm.s.fDoneVMMR0Term = false;
    710710
    711     for (unsigned i=0; i< pGVM->cCPUs; i++)
     711    for (VMCPUID i = 0; i < pGVM->cCpus; i++)
    712712    {
    713713        pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;
     
    732732    PGVM pGVM;
    733733    PGVMM pGVMM;
    734     int rc = gvmmR0ByVMAndEMT(pVM, 0 /* VCPU 0 */, &pGVM, &pGVMM);
     734    int rc = gvmmR0ByVMAndEMT(pVM, 0 /* idCpu */, &pGVM, &pGVMM);
    735735    if (RT_SUCCESS(rc))
    736736    {
     
    738738            && pGVM->aCpus[0].gvmm.s.HaltEventMulti == NIL_RTSEMEVENTMULTI)
    739739        {
    740             for (unsigned i=0; i < pGVM->cCPUs; i++)
     740            for (VMCPUID i = 0; i < pGVM->cCpus; i++)
    741741            {
    742742                rc = RTSemEventMultiCreate(&pGVM->aCpus[i].gvmm.s.HaltEventMulti);
     
    762762 *
    763763 * @param   pVM         Pointer to the shared VM structure.
     764 * @thread  EMT(0)
    764765 */
    765766GVMMR0DECL(void) GVMMR0DoneInitVM(PVM pVM)
     
    768769    PGVM pGVM;
    769770    PGVMM pGVMM;
    770     int rc = gvmmR0ByVMAndEMT(pVM, 0 /* VCPU 0 */, &pGVM, &pGVMM);
     771    int rc = gvmmR0ByVMAndEMT(pVM, 0 /* idCpu */, &pGVM, &pGVMM);
    771772    AssertRCReturnVoid(rc);
    772773
     
    782783 * @param   pVM         Pointer to the shared VM structure.
    783784 * @param   pGVM        Pointer to the global VM structure. Optional.
     785 * @thread  EMT(0)
    784786 */
    785787GVMMR0DECL(bool) GVMMR0DoingTermVM(PVM pVM, PGVM pGVM)
     
    791793    {
    792794        PGVMM pGVMM;
    793         int rc = gvmmR0ByVMAndEMT(pVM, 0 /* VCPU 0 */, &pGVM, &pGVMM);
     795        int rc = gvmmR0ByVMAndEMT(pVM, 0 /* idCpu */, &pGVM, &pGVMM);
    794796        AssertRCReturn(rc, false);
    795797    }
     
    814816 * @param   pVM         Where to store the pointer to the VM structure.
    815817 *
    816  * @thread  EMT if it's associated with the VM, otherwise any thread.
     818 * @thread  EMT(0) if it's associated with the VM, otherwise any thread.
    817819 */
    818820GVMMR0DECL(int) GVMMR0DestroyVM(PVM pVM)
     
    865867    else
    866868    {
    867         SUPR0Printf("GVMMR0DestroyVM: pHandle=%p:{.pVM=%p, hEMT=%p, .pvObj=%p} pVM=%p hSelf=%p\n",
     869        SUPR0Printf("GVMMR0DestroyVM: pHandle=%p:{.pVM=%p, hEMTCpu0=%p, .pvObj=%p} pVM=%p hSelf=%p\n",
    868870                    pHandle, pHandle->pVM, pHandle->hEMTCpu0, pHandle->pvObj, pVM, hSelf);
    869871        gvmmR0CreateDestroyUnlock(pGVMM);
     
    10191021        }
    10201022
    1021         for (unsigned i=0; i< pGVM->cCPUs; i++)
     1023        for (VMCPUID i = 0; i < pGVM->cCpus; i++)
    10221024        {
    10231025            if (pGVM->aCpus[i].gvmm.s.HaltEventMulti != NIL_RTSEMEVENTMULTI)
     
    10641066 * @param   idCpu           VCPU id.
    10651067 */
    1066 GVMMR0DECL(int) GVMMR0RegisterVCpu(PVM pVM, unsigned idCpu)
     1068GVMMR0DECL(int) GVMMR0RegisterVCpu(PVM pVM, VMCPUID idCpu)
    10671069{
    10681070    AssertReturn(idCpu != 0, VERR_NOT_OWNER);
     
    12161218 * @returns VBox status code.
    12171219 * @param   pVM         The shared VM structure (the ring-0 mapping).
    1218  * @param   idCpu       VCPU id
     1220 * @param   idCpu       The Virtual CPU ID of the calling EMT.
    12191221 * @param   ppGVM       Where to store the GVM pointer.
    12201222 * @param   ppGVMM      Where to store the pointer to the GVMM instance data.
    12211223 * @thread  EMT
    12221224 *
    1223  * @remark  This will assert in failure paths.
    1224  */
    1225 static int gvmmR0ByVMAndEMT(PVM pVM, unsigned idCpu, PGVM *ppGVM, PGVMM *ppGVMM)
     1225 * @remark  This will assert in all failure paths.
     1226 */
     1227static int gvmmR0ByVMAndEMT(PVM pVM, VMCPUID idCpu, PGVM *ppGVM, PGVMM *ppGVMM)
    12261228{
    12271229    PGVMM pGVMM;
     
    12421244     */
    12431245    PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
    1244     RTNATIVETHREAD hAllegedEMT = RTThreadNativeSelf();
    12451246    AssertReturn(pHandle->pVM == pVM, VERR_NOT_OWNER);
    12461247    AssertPtrReturn(pHandle->pvObj, VERR_INTERNAL_ERROR);
     
    12491250    AssertPtrReturn(pGVM, VERR_INTERNAL_ERROR);
    12501251    AssertReturn(pGVM->pVM == pVM, VERR_INTERNAL_ERROR);
     1252    RTNATIVETHREAD hAllegedEMT = RTThreadNativeSelf();
     1253    AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
     1254#ifdef DEBUG_bird /* did bad stuff to my box just now, take it easy. */
     1255    if (RT_UNLIKELY(pGVM->aCpus[idCpu].hEMT != hAllegedEMT))
     1256    {
     1257        SUPR0Printf("gvmmR0ByVMAndEMT: %x != %x idCpu=%u\n", pGVM->aCpus[idCpu].hEMT, hAllegedEMT, idCpu);
     1258        return VERR_INTERNAL_ERROR;
     1259    }
     1260#else
    12511261    AssertReturn(pGVM->aCpus[idCpu].hEMT == hAllegedEMT, VERR_INTERNAL_ERROR);
     1262#endif
    12521263
    12531264    *ppGVM = pGVM;
     
    12631274 * @returns VBox status code.
    12641275 * @param   pVM         The shared VM structure (the ring-0 mapping).
    1265  * @param   idCpu       VCPU id
     1276 * @param   idCpu       The Virtual CPU ID of the calling EMT.
    12661277 * @param   ppGVM       Where to store the GVM pointer.
    12671278 * @thread  EMT
    12681279 */
    1269 GVMMR0DECL(int) GVMMR0ByVMAndEMT(PVM pVM, unsigned idCpu, PGVM *ppGVM)
     1280GVMMR0DECL(int) GVMMR0ByVMAndEMT(PVM pVM, VMCPUID idCpu, PGVM *ppGVM)
    12701281{
    12711282    AssertPtrReturn(ppGVM, VERR_INVALID_POINTER);
     
    13231334            &&  VALID_PTR(pGVMM->aHandles[i].pGVM))
    13241335        {
     1336            if (pGVMM->aHandles[i].hEMTCpu0 == hEMT)
     1337                return pGVMM->aHandles[i].pVM;
     1338
     1339            /** @todo this isn't safe as GVM may be deallocated while we're running.
     1340             * Will change this to use RTPROCESS on the handle level as storing all the
     1341             * thread handles there doesn't scale very well. */
    13251342            PGVM pGVM = pGVMM->aHandles[i].pGVM;
    1326 
    1327             for (unsigned idCpu = 0; idCpu < pGVM->cCPUs; idCpu++)
     1343            for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
    13281344                if (pGVM->aCpus[idCpu].hEMT == hEMT)
    13291345                    return pGVMM->aHandles[i].pVM;
     
    13431359static unsigned gvmmR0SchedDoWakeUps(PGVMM pGVMM, uint64_t u64Now)
    13441360{
     1361/** @todo Rewrite this algorithm. See performance defect XYZ. */
     1362
    13451363    /*
    13461364     * The first pass will wake up VMs which have actually expired
     
    13591377            &&  pCurGVM->u32Magic == GVM_MAGIC)
    13601378        {
    1361             for (unsigned idCpu = 0; idCpu < pCurGVM->cCPUs; idCpu++)
     1379            for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
    13621380            {
    13631381                PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
     
    13991417                &&  pCurGVM->u32Magic == GVM_MAGIC)
    14001418            {
    1401                 for (unsigned idCpu = 0; idCpu < pCurGVM->cCPUs; idCpu++)
     1419                for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
    14021420                {
    14031421                    PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
     
    14291447                &&  pCurGVM->u32Magic == GVM_MAGIC)
    14301448            {
    1431                 for (unsigned idCpu = 0; idCpu < pCurGVM->cCPUs; idCpu++)
     1449                for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
    14321450                {
    14331451                    PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
     
    14591477 *          VERR_INTERRUPTED if a signal was scheduled for the thread.
    14601478 * @param   pVM                 Pointer to the shared VM structure.
    1461  * @param   idCpu               VCPU id
     1479 * @param   idCpu               The Virtual CPU ID of the calling EMT.
    14621480 * @param   u64ExpireGipTime    The time for the sleep to expire expressed as GIP time.
    1463  * @thread  EMT.
    1464  */
    1465 GVMMR0DECL(int) GVMMR0SchedHalt(PVM pVM, unsigned idCpu, uint64_t u64ExpireGipTime)
     1481 * @thread  EMT(idCpu).
     1482 */
     1483GVMMR0DECL(int) GVMMR0SchedHalt(PVM pVM, VMCPUID idCpu, uint64_t u64ExpireGipTime)
    14661484{
    14671485    LogFlow(("GVMMR0SchedHalt: pVM=%p\n", pVM));
     
    14701488     * Validate the VM structure, state and handle.
    14711489     */
    1472     PGVMM   pGVMM;
    1473     PGVM    pGVM;
    1474     PGVMCPU pCurGVCpu;
    1475 
     1490    PGVM pGVM;
     1491    PGVMM pGVMM;
    14761492    int rc = gvmmR0ByVMAndEMT(pVM, idCpu, &pGVM, &pGVMM);
    14771493    if (RT_FAILURE(rc))
    14781494        return rc;
    1479 
    14801495    pGVM->gvmm.s.StatsSched.cHaltCalls++;
    14811496
    1482     pCurGVCpu = &pGVM->aCpus[idCpu];
    1483     Assert(idCpu < pGVM->cCPUs);
     1497    PGVMCPU pCurGVCpu = &pGVM->aCpus[idCpu];
     1498    Assert(idCpu < pGVM->cCpus);
    14841499    Assert(!pCurGVCpu->gvmm.s.u64HaltExpire);
    14851500
     
    15381553 *          VINF_GVM_NOT_BLOCKED if the EMT thread wasn't blocked.
    15391554 * @param   pVM                 Pointer to the shared VM structure.
    1540  * @param   idCpu               VCPU id
     1555 * @param   idCpu               The Virtual CPU ID of the EMT to wake up.
    15411556 * @thread  Any but EMT.
    15421557 */
    1543 GVMMR0DECL(int) GVMMR0SchedWakeUp(PVM pVM, unsigned idCpu)
     1558GVMMR0DECL(int) GVMMR0SchedWakeUp(PVM pVM, VMCPUID idCpu)
    15441559{
    15451560    /*
    15461561     * Validate input and take the UsedLock.
    15471562     */
    1548     PGVMM   pGVMM;
    1549     PGVM    pGVM;
    1550     PGVMCPU pCurGVCpu;
    1551 
     1563    PGVM pGVM;
     1564    PGVMM pGVMM;
    15521565    int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, true /* fTakeUsedLock */);
    15531566    if (RT_SUCCESS(rc))
    15541567    {
    1555         Assert(idCpu < pGVM->cCPUs);
    1556 
    1557         pCurGVCpu = &pGVM->aCpus[idCpu];
    1558 
    1559         pGVM->gvmm.s.StatsSched.cWakeUpCalls++;
    1560 
    1561         /*
    1562          * Signal the semaphore regardless of whether it's current blocked on it.
    1563          *
    1564          * The reason for this is that there is absolutely no way we can be 100%
    1565          * certain that it isn't *about* go to go to sleep on it and just got
    1566          * delayed a bit en route. So, we will always signal the semaphore when
    1567          * the it is flagged as halted in the VMM.
    1568          */
    1569         if (pCurGVCpu->gvmm.s.u64HaltExpire)
    1570         {
    1571             rc = VINF_SUCCESS;
    1572             ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0);
     1568        if (idCpu < pGVM->cCpus)
     1569        {
     1570            PGVMCPU pCurGVCpu = &pGVM->aCpus[idCpu];
     1571            pGVM->gvmm.s.StatsSched.cWakeUpCalls++;
     1572
     1573            /*
     1574             * Signal the semaphore regardless of whether it's current blocked on it.
     1575             *
     1576             * The reason for this is that there is absolutely no way we can be 100%
     1577             * certain that it isn't *about* go to go to sleep on it and just got
     1578             * delayed a bit en route. So, we will always signal the semaphore when
     1579             * the it is flagged as halted in the VMM.
     1580             */
     1581            if (pCurGVCpu->gvmm.s.u64HaltExpire)
     1582            {
     1583                rc = VINF_SUCCESS;
     1584                ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0);
     1585            }
     1586            else
     1587            {
     1588                rc = VINF_GVM_NOT_BLOCKED;
     1589                pGVM->gvmm.s.StatsSched.cWakeUpNotHalted++;
     1590            }
     1591
     1592            int rc2 = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti);
     1593            AssertRC(rc2);
     1594
     1595            /*
     1596             * While we're here, do a round of scheduling.
     1597             */
     1598            Assert(ASMGetFlags() & X86_EFL_IF);
     1599            const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
     1600            pGVM->gvmm.s.StatsSched.cWakeUpWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
     1601
    15731602        }
    15741603        else
    1575         {
    1576             rc = VINF_GVM_NOT_BLOCKED;
    1577             pGVM->gvmm.s.StatsSched.cWakeUpNotHalted++;
    1578         }
    1579 
    1580         int rc2 = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti);
    1581         AssertRC(rc2);
    1582 
    1583         /*
    1584          * While we're here, do a round of scheduling.
    1585          */
    1586         Assert(ASMGetFlags() & X86_EFL_IF);
    1587         const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
    1588         pGVM->gvmm.s.StatsSched.cWakeUpWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
    1589 
    1590 
    1591         rc2 = gvmmR0UsedUnlock(pGVMM);
     1604            rc = VERR_INVALID_CPU_ID;
     1605
     1606        int rc2 = gvmmR0UsedUnlock(pGVMM);
    15921607        AssertRC(rc2);
    15931608    }
     
    16071622 *          VINF_GVM_YIELDED if an attempt to switch to a different VM task was made.
    16081623 * @param   pVM                 Pointer to the shared VM structure.
    1609  * @param   idCpu               VCPU id
     1624 * @param   idCpu               The Virtual CPU ID of the calling EMT.
    16101625 * @param   u64ExpireGipTime    The time for the sleep to expire expressed as GIP time.
    16111626 * @param   fYield              Whether to yield or not.
    16121627 *                              This is for when we're spinning in the halt loop.
    1613  * @thread  EMT.
    1614  */
    1615 GVMMR0DECL(int) GVMMR0SchedPoll(PVM pVM, unsigned idCpu, bool fYield)
     1628 * @thread  EMT(idCpu).
     1629 */
     1630GVMMR0DECL(int) GVMMR0SchedPoll(PVM pVM, VMCPUID idCpu, bool fYield)
    16161631{
    16171632    /*
  • trunk/src/VBox/VMM/VMReq.cpp

    r19366 r19395  
    262262VMMR3DECL(int) VMR3ReqCallVU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, unsigned cMillies, unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, va_list Args)
    263263{
    264     LogFlow(("VMR3ReqCallV: cMillies=%d fFlags=%#x pfnFunction=%p cArgs=%d\n", cMillies, fFlags, pfnFunction, cArgs));
     264    LogFlow(("VMR3ReqCallV: idDstCpu=%u cMillies=%d fFlags=%#x pfnFunction=%p cArgs=%d\n", idDstCpu, cMillies, fFlags, pfnFunction, cArgs));
    265265
    266266    /*
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette