VirtualBox

Changeset 19434 in vbox


Ignore:
Timestamp:
May 6, 2009 1:58:35 PM (16 years ago)
Author:
vboxsync
Message:

Further splitup of VMM (ring 0 jump buffer).

Location:
trunk
Files:
10 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vm.h

    r19423 r19434  
    161161    } tm;
    162162
    163     /** VMM part.
    164      * @todo Combine this with other tiny structures. */
     163    /** VMM part. */
    165164    union
    166165    {
     
    168167        struct VMMCPU       s;
    169168#endif
    170         char                padding[64];        /* multiple of 64 */
     169        char                padding[256];       /* multiple of 64 */
    171170    } vmm;
    172171
  • trunk/src/VBox/VMM/VMM.cpp

    r19366 r19434  
    107107static DECLCALLBACK(int)    vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
    108108static DECLCALLBACK(void)   vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
    109 static int                  vmmR3ServiceCallHostRequest(PVM pVM);
     109static int                  vmmR3ServiceCallHostRequest(PVM pVM, PVMCPU pVCpu);
    110110static DECLCALLBACK(void)   vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    111111
     
    125125     */
    126126    AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
    127     AssertMsg(sizeof(pVM->vmm.padding) >= sizeof(pVM->vmm.s),
    128               ("pVM->vmm.padding is too small! vmm.padding %d while vmm.s is %d\n",
    129                sizeof(pVM->vmm.padding), sizeof(pVM->vmm.s)));
     127    AssertCompile(sizeof(pVM->vmm.s) <= sizeof(pVM->vmm.padding));
     128    AssertCompile(sizeof(pVM->aCpus[0].vmm.s) <= sizeof(pVM->aCpus[0].vmm.padding));
    130129
    131130    /*
     
    218217static int vmmR3InitStacks(PVM pVM)
    219218{
    220     /** @todo SMP: One stack per vCPU. */
     219    int rc = VINF_SUCCESS;
     220
     221    for (unsigned idCpu = 0; idCpu < pVM->cCPUs; idCpu++)
     222    {
     223        PVMCPU pVCpu = &pVM->aCpus[idCpu];
     224
    221225#ifdef VBOX_STRICT_VMM_STACK
    222     int rc = MMR3HyperAllocOnceNoRel(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbEMTStackR3);
     226        rc = MMR3HyperAllocOnceNoRel(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVCpu->vmm.s.pbEMTStackR3);
    223227#else
    224     int rc = MMR3HyperAllocOnceNoRel(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbEMTStackR3);
    225 #endif
    226     if (RT_SUCCESS(rc))
    227     {
     228        rc = MMR3HyperAllocOnceNoRel(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVCpu->vmm.s.pbEMTStackR3);
     229#endif
     230        if (RT_SUCCESS(rc))
     231        {
    228232#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    229         /* MMHyperR3ToR0 returns R3 when not doing hardware assisted virtualization. */
    230         if (!VMMIsHwVirtExtForced(pVM))
    231             pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = NIL_RTR0PTR;
    232         else
    233 #endif
    234             pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = MMHyperR3ToR0(pVM, pVM->vmm.s.pbEMTStackR3);
    235         pVM->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3);
    236         pVM->vmm.s.pbEMTStackBottomRC = pVM->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
    237         AssertRelease(pVM->vmm.s.pbEMTStackRC);
    238 
    239         for (unsigned i=0;i<pVM->cCPUs;i++)
    240         {
    241             PVMCPU pVCpu = &pVM->aCpus[i];
    242             CPUMSetHyperESP(pVCpu, pVM->vmm.s.pbEMTStackBottomRC);
     233            /* MMHyperR3ToR0 returns R3 when not doing hardware assisted virtualization. */
     234            if (!VMMIsHwVirtExtForced(pVM))
     235                pVCpu->vmm.s.CallHostR0JmpBuf.pvSavedStack = NIL_RTR0PTR;
     236            else
     237#endif
     238                pVCpu->vmm.s.CallHostR0JmpBuf.pvSavedStack = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);
     239            pVCpu->vmm.s.pbEMTStackRC       = MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3);
     240            pVCpu->vmm.s.pbEMTStackBottomRC = pVCpu->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
     241            AssertRelease(pVCpu->vmm.s.pbEMTStackRC);
     242
     243            CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC);
    243244        }
    244245    }
     
    389390VMMR3DECL(int) VMMR3InitFinalize(PVM pVM)
    390391{
     392    int rc = VINF_SUCCESS;
     393
     394    for (unsigned idCpu = 0; idCpu < pVM->cCPUs; idCpu++)
     395    {
     396        PVMCPU pVCpu = &pVM->aCpus[idCpu];
     397
    391398#ifdef VBOX_STRICT_VMM_STACK
    392     /*
    393      * Two inaccessible pages at each sides of the stack to catch over/under-flows.
    394      */
    395     memset(pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE, 0xcc, PAGE_SIZE);
    396     PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE), PAGE_SIZE, 0);
    397     RTMemProtect(pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
    398 
    399     memset(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
    400     PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE), PAGE_SIZE, 0);
    401     RTMemProtect(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
    402 #endif
    403 
    404     /*
    405      * Set page attributes to r/w for stack pages.
    406      */
    407     int rc = PGMMapSetPage(pVM, pVM->vmm.s.pbEMTStackRC, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
    408     AssertRC(rc);
     399        /*
     400         * Two inaccessible pages at each sides of the stack to catch over/under-flows.
     401         */
     402        memset(pVCpu->vmm.s.pbEMTStackR3 - PAGE_SIZE, 0xcc, PAGE_SIZE);
     403        PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3 - PAGE_SIZE), PAGE_SIZE, 0);
     404        RTMemProtect(pVCpu->vmm.s.pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
     405
     406        memset(pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
     407        PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE), PAGE_SIZE, 0);
     408        RTMemProtect(pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
     409#endif
     410
     411        /*
     412        * Set page attributes to r/w for stack pages.
     413        */
     414        rc = PGMMapSetPage(pVM, pVCpu->vmm.s.pbEMTStackRC, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
     415        AssertRC(rc);
     416        if (RT_FAILURE(rc))
     417            break;
     418    }
    409419    if (RT_SUCCESS(rc))
    410420    {
     
    437447VMMR3DECL(int) VMMR3InitR0(PVM pVM)
    438448{
    439     int rc;
     449    int    rc;
     450    PVMCPU pVCpu = VMMGetCpu(pVM);
     451    Assert(pVCpu && pVCpu->idCpu == 0);
    440452
    441453    /*
     
    466478        if (rc != VINF_VMM_CALL_HOST)
    467479            break;
    468         rc = vmmR3ServiceCallHostRequest(pVM);
     480        rc = vmmR3ServiceCallHostRequest(pVM, pVCpu);
    469481        if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    470482            break;
     
    491503{
    492504    PVMCPU pVCpu = VMMGetCpu(pVM);
    493     Assert(pVCpu);
     505    Assert(pVCpu && pVCpu->idCpu == 0);
    494506
    495507    /* In VMX mode, there's no need to init RC. */
     
    510522    {
    511523        CPUMHyperSetCtxCore(pVCpu, NULL);
    512         CPUMSetHyperESP(pVCpu, pVM->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
     524        CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
    513525        uint64_t u64TS = RTTimeProgramStartNanoTS();
    514526        CPUMPushHyper(pVCpu, (uint32_t)(u64TS >> 32));    /* Param 3: The program startup TS - Hi. */
     
    543555            if (rc != VINF_VMM_CALL_HOST)
    544556                break;
    545             rc = vmmR3ServiceCallHostRequest(pVM);
     557            rc = vmmR3ServiceCallHostRequest(pVM, pVCpu);
    546558            if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    547559                break;
     
    568580VMMR3DECL(int) VMMR3Term(PVM pVM)
    569581{
     582    PVMCPU pVCpu = VMMGetCpu(pVM);
     583    Assert(pVCpu && pVCpu->idCpu == 0);
     584
    570585    /*
    571586     * Call Ring-0 entry with termination code.
     
    585600        if (rc != VINF_VMM_CALL_HOST)
    586601            break;
    587         rc = vmmR3ServiceCallHostRequest(pVM);
     602        rc = vmmR3ServiceCallHostRequest(pVM, pVCpu);
    588603        if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    589604            break;
     
    650665
    651666        CPUMSetHyperESP(pVCpu, CPUMGetHyperESP(pVCpu) + offDelta);
    652     }
    653 
    654     pVM->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3);
    655     pVM->vmm.s.pbEMTStackBottomRC = pVM->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
     667
     668        pVCpu->vmm.s.pbEMTStackRC       = MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3);
     669        pVCpu->vmm.s.pbEMTStackBottomRC = pVCpu->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
     670    }
     671
    656672
    657673    /*
     
    810826    LogFlow(("vmmR3Save:\n"));
    811827
    812     /*
    813      * The hypervisor stack.
    814      * Note! See not in vmmR3Load.
    815      */
    816     SSMR3PutRCPtr(pSSM, pVM->vmm.s.pbEMTStackBottomRC);
    817 
    818828    for (unsigned i=0;i<pVM->cCPUs;i++)
    819829    {
    820830        PVMCPU pVCpu = &pVM->aCpus[i];
    821831
     832        /*
     833         * The hypervisor stack.
     834         * Note! See not in vmmR3Load.
     835         */
     836        SSMR3PutRCPtr(pSSM, pVCpu->vmm.s.pbEMTStackBottomRC);
     837
    822838        RTRCPTR RCPtrESP = CPUMGetHyperESP(pVCpu);
    823         AssertMsg(pVM->vmm.s.pbEMTStackBottomRC - RCPtrESP <= VMM_STACK_SIZE, ("Bottom %RRv ESP=%RRv\n", pVM->vmm.s.pbEMTStackBottomRC, RCPtrESP));
     839        AssertMsg(pVCpu->vmm.s.pbEMTStackBottomRC - RCPtrESP <= VMM_STACK_SIZE, ("Bottom %RRv ESP=%RRv\n", pVCpu->vmm.s.pbEMTStackBottomRC, RCPtrESP));
    824840        SSMR3PutRCPtr(pSSM, RCPtrESP);
    825     }
    826     SSMR3PutMem(pSSM, pVM->vmm.s.pbEMTStackR3, VMM_STACK_SIZE);
     841
     842        SSMR3PutMem(pSSM, pVCpu->vmm.s.pbEMTStackR3, VMM_STACK_SIZE);
     843    }
    827844    return SSMR3PutU32(pSSM, ~0); /* terminator */
    828845}
     
    865882
    866883    /* restore the stack.  */
    867     SSMR3GetMem(pSSM, pVM->vmm.s.pbEMTStackR3, VMM_STACK_SIZE);
     884    for (unsigned i=0;i<pVM->cCPUs;i++)
     885    {
     886        PVMCPU pVCpu = &pVM->aCpus[i];
     887
     888        SSMR3GetMem(pSSM, pVCpu->vmm.s.pbEMTStackR3, VMM_STACK_SIZE);
     889    }
    868890
    869891    /* terminator */
     
    10251047                    ? pVM->vmm.s.pfnCPUMRCResumeGuestV86
    10261048                    : pVM->vmm.s.pfnCPUMRCResumeGuest);
    1027     CPUMSetHyperESP(pVCpu, pVM->vmm.s.pbEMTStackBottomRC);
     1049    CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC);
    10281050
    10291051    /*
     
    10671089            return rc;
    10681090        }
    1069         rc = vmmR3ServiceCallHostRequest(pVM);
     1091        rc = vmmR3ServiceCallHostRequest(pVM, pVCpu);
    10701092        if (RT_FAILURE(rc))
    10711093            return rc;
     
    11131135            return rc;
    11141136        }
    1115         rc = vmmR3ServiceCallHostRequest(pVM);
     1137        rc = vmmR3ServiceCallHostRequest(pVM, pVCpu);
    11161138        if (RT_FAILURE(rc))
    11171139            return rc;
     
    11591181     */
    11601182    CPUMHyperSetCtxCore(pVCpu, NULL);
    1161     memset(pVM->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
    1162     CPUMSetHyperESP(pVCpu, pVM->vmm.s.pbEMTStackBottomRC - cArgs * sizeof(RTGCUINTPTR32));
    1163     PRTGCUINTPTR32 pFrame = (PRTGCUINTPTR32)(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE) - cArgs;
     1183    memset(pVCpu->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
     1184    CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC - cArgs * sizeof(RTGCUINTPTR32));
     1185    PRTGCUINTPTR32 pFrame = (PRTGCUINTPTR32)(pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE) - cArgs;
    11641186    int i = cArgs;
    11651187    while (i-- > 0)
     
    12091231            return rc;
    12101232        }
    1211         rc = vmmR3ServiceCallHostRequest(pVM);
     1233        rc = vmmR3ServiceCallHostRequest(pVM, pVCpu);
    12121234        if (RT_FAILURE(rc))
    12131235            return rc;
     
    12481270        if (rc != VINF_VMM_CALL_HOST)
    12491271            break;
    1250         rc = vmmR3ServiceCallHostRequest(pVM);
     1272        rc = vmmR3ServiceCallHostRequest(pVM, pVCpu);
    12511273        if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
    12521274            break;
     
    13131335            return rc;
    13141336        }
    1315         rc = vmmR3ServiceCallHostRequest(pVM);
     1337        rc = vmmR3ServiceCallHostRequest(pVM, pVCpu);
    13161338        if (RT_FAILURE(rc))
    13171339            return rc;
     
    13251347 * @returns VBox status code.
    13261348 * @param   pVM     VM handle.
     1349 * @param   pVCpu   VMCPU handle
    13271350 * @remark  Careful with critsects.
    13281351 */
    1329 static int vmmR3ServiceCallHostRequest(PVM pVM)
    1330 {
    1331     switch (pVM->vmm.s.enmCallHostOperation)
     1352static int vmmR3ServiceCallHostRequest(PVM pVM, PVMCPU pVCpu)
     1353{
     1354    switch (pVCpu->vmm.s.enmCallHostOperation)
    13321355    {
    13331356        /*
     
    13361359        case VMMCALLHOST_PDM_LOCK:
    13371360        {
    1338             pVM->vmm.s.rcCallHost = PDMR3LockCall(pVM);
     1361            pVCpu->vmm.s.rcCallHost = PDMR3LockCall(pVM);
    13391362            break;
    13401363        }
     
    13461369        {
    13471370            PDMR3QueueFlushWorker(pVM, NULL);
    1348             pVM->vmm.s.rcCallHost = VINF_SUCCESS;
     1371            pVCpu->vmm.s.rcCallHost = VINF_SUCCESS;
    13491372            break;
    13501373        }
     
    13551378        case VMMCALLHOST_PGM_POOL_GROW:
    13561379        {
    1357             pVM->vmm.s.rcCallHost = PGMR3PoolGrow(pVM);
     1380            pVCpu->vmm.s.rcCallHost = PGMR3PoolGrow(pVM);
    13581381            break;
    13591382        }
     
    13641387        case VMMCALLHOST_PGM_MAP_CHUNK:
    13651388        {
    1366             pVM->vmm.s.rcCallHost = PGMR3PhysChunkMap(pVM, pVM->vmm.s.u64CallHostArg);
     1389            pVCpu->vmm.s.rcCallHost = PGMR3PhysChunkMap(pVM, pVCpu->vmm.s.u64CallHostArg);
    13671390            break;
    13681391        }
     
    13731396        case VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES:
    13741397        {
    1375             pVM->vmm.s.rcCallHost = PGMR3PhysAllocateHandyPages(pVM);
     1398            pVCpu->vmm.s.rcCallHost = PGMR3PhysAllocateHandyPages(pVM);
    13761399            break;
    13771400        }
     
    13821405        case VMMCALLHOST_PGM_LOCK:
    13831406        {
    1384             pVM->vmm.s.rcCallHost = PGMR3LockCall(pVM);
     1407            pVCpu->vmm.s.rcCallHost = PGMR3LockCall(pVM);
    13851408            break;
    13861409        }
     
    13921415        {
    13931416            REMR3ReplayHandlerNotifications(pVM);
    1394             pVM->vmm.s.rcCallHost = VINF_SUCCESS;
     1417            pVCpu->vmm.s.rcCallHost = VINF_SUCCESS;
    13951418            break;
    13961419        }
     
    14011424         */
    14021425        case VMMCALLHOST_VMM_LOGGER_FLUSH:
    1403             pVM->vmm.s.rcCallHost = VINF_SUCCESS;
     1426            pVCpu->vmm.s.rcCallHost = VINF_SUCCESS;
    14041427            LogAlways(("*FLUSH*\n"));
    14051428            break;
     
    14101433        case VMMCALLHOST_VM_SET_ERROR:
    14111434            VMR3SetErrorWorker(pVM);
    1412             pVM->vmm.s.rcCallHost = VINF_SUCCESS;
     1435            pVCpu->vmm.s.rcCallHost = VINF_SUCCESS;
    14131436            break;
    14141437
     
    14171440         */
    14181441        case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
    1419             pVM->vmm.s.rcCallHost = VMR3SetRuntimeErrorWorker(pVM);
     1442            pVCpu->vmm.s.rcCallHost = VMR3SetRuntimeErrorWorker(pVM);
    14201443            break;
    14211444
     
    14251448         */
    14261449        case VMMCALLHOST_VM_R0_ASSERTION:
    1427             pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
    1428             pVM->vmm.s.CallHostR0JmpBuf.fInRing3Call = false;
     1450            pVCpu->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
     1451            pVCpu->vmm.s.CallHostR0JmpBuf.fInRing3Call = false;
    14291452#ifdef RT_ARCH_X86
    1430             pVM->vmm.s.CallHostR0JmpBuf.eip = 0;
     1453            pVCpu->vmm.s.CallHostR0JmpBuf.eip = 0;
    14311454#else
    1432             pVM->vmm.s.CallHostR0JmpBuf.rip = 0;
     1455            pVCpu->vmm.s.CallHostR0JmpBuf.rip = 0;
    14331456#endif
    14341457            LogRel((pVM->vmm.s.szRing0AssertMsg1));
     
    14401463         */
    14411464        case VMMCALLHOST_VM_R0_PREEMPT:
    1442             pVM->vmm.s.rcCallHost = VINF_SUCCESS;
     1465            pVCpu->vmm.s.rcCallHost = VINF_SUCCESS;
    14431466            break;
    14441467
    14451468        default:
    1446             AssertMsgFailed(("enmCallHostOperation=%d\n", pVM->vmm.s.enmCallHostOperation));
     1469            AssertMsgFailed(("enmCallHostOperation=%d\n", pVCpu->vmm.s.enmCallHostOperation));
    14471470            return VERR_INTERNAL_ERROR;
    14481471    }
    14491472
    1450     pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
     1473    pVCpu->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
    14511474    return VINF_SUCCESS;
    14521475}
  • trunk/src/VBox/VMM/VMMAll/VMMAll.cpp

    r19358 r19434  
    2828#include "VMMInternal.h"
    2929#include <VBox/vm.h>
     30#include <VBox/vmm.h>
    3031#include <VBox/param.h>
    3132#include <VBox/hwaccm.h>
     
    4344VMMDECL(RTRCPTR) VMMGetStackRC(PVM pVM)
    4445{
    45     return (RTRCPTR)pVM->vmm.s.pbEMTStackBottomRC;
     46    PVMCPU pVCpu = VMMGetCpu(pVM);
     47    Assert(pVCpu);
     48
     49    return (RTRCPTR)pVCpu->vmm.s.pbEMTStackBottomRC;
    4650}
    4751
  • trunk/src/VBox/VMM/VMMGC/VMMGC.cpp

    r17422 r19434  
    244244VMMRCDECL(int) VMMGCCallHost(PVM pVM, VMMCALLHOST enmOperation, uint64_t uArg)
    245245{
     246    PVMCPU pVCpu = VMMGetCpu0(pVM);
     247
    246248/** @todo profile this! */
    247     pVM->vmm.s.enmCallHostOperation = enmOperation;
    248     pVM->vmm.s.u64CallHostArg = uArg;
    249     pVM->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
     249    pVCpu->vmm.s.enmCallHostOperation = enmOperation;
     250    pVCpu->vmm.s.u64CallHostArg = uArg;
     251    pVCpu->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
    250252    pVM->vmm.s.pfnGuestToHostRC(VINF_VMM_CALL_HOST);
    251     return pVM->vmm.s.rcCallHost;
     253    return pVCpu->vmm.s.rcCallHost;
    252254}
    253255
  • trunk/src/VBox/VMM/VMMGuruMeditation.cpp

    r19293 r19434  
    385385                                "!!\n"
    386386                                "%.*Rhxd\n",
    387                                 pVM->vmm.s.pbEMTStackRC, pVM->vmm.s.pbEMTStackBottomRC,
    388                                 VMM_STACK_SIZE, pVM->vmm.s.pbEMTStackR3);
     387                                pVCpu->vmm.s.pbEMTStackRC, pVCpu->vmm.s.pbEMTStackBottomRC,
     388                                VMM_STACK_SIZE, pVCpu->vmm.s.pbEMTStackR3);
    389389            } /* !HWACCMR3IsActive */
    390390            break;
  • trunk/src/VBox/VMM/VMMInternal.h

    r19366 r19434  
    214214    R0PTRTYPE(PFNVMMSWITCHERHC) pfnHostToGuestR0;
    215215    /** @}  */
    216 
    217     /** VMM stack, pointer to the top of the stack in R3.
    218      * Stack is allocated from the hypervisor heap and is page aligned
    219      * and always writable in RC. */
    220     R3PTRTYPE(uint8_t *)        pbEMTStackR3;
    221     /** Pointer to the bottom of the stack - needed for doing relocations. */
    222     RCPTRTYPE(uint8_t *)        pbEMTStackRC;
    223     /** Pointer to the bottom of the stack - needed for doing relocations. */
    224     RCPTRTYPE(uint8_t *)        pbEMTStackBottomRC;
    225216
    226217    /** @name Logging
     
    262253    /** The timestamp of the previous yield. (nano) */
    263254    uint64_t                    u64LastYield;
    264 
    265     /** @name CallHost
    266      * @todo SMP: per vCPU
    267      * @{ */
    268     /** The pending operation. */
    269     VMMCALLHOST                 enmCallHostOperation;
    270     /** The result of the last operation. */
    271     int32_t                     rcCallHost;
    272     /** The argument to the operation. */
    273     uint64_t                    u64CallHostArg;
    274     /** The Ring-0 jmp buffer. */
    275     VMMR0JMPBUF                 CallHostR0JmpBuf;
    276     /** @} */
    277255
    278256    /** Buffer for storing the standard assertion message for a ring-0 assertion.
     
    353331     * See VMM2VMCPU(). */
    354332    RTINT                       offVMCPU;
     333
     334    /** VMM stack, pointer to the top of the stack in R3.
     335     * Stack is allocated from the hypervisor heap and is page aligned
     336     * and always writable in RC. */
     337    R3PTRTYPE(uint8_t *)        pbEMTStackR3;
     338    /** Pointer to the bottom of the stack - needed for doing relocations. */
     339    RCPTRTYPE(uint8_t *)        pbEMTStackRC;
     340    /** Pointer to the bottom of the stack - needed for doing relocations. */
     341    RCPTRTYPE(uint8_t *)        pbEMTStackBottomRC;
     342
     343    /** @name CallHost
     344     * @{ */
     345    /** The pending operation. */
     346    VMMCALLHOST                 enmCallHostOperation;
     347    /** The result of the last operation. */
     348    int32_t                     rcCallHost;
     349    /** The argument to the operation. */
     350    uint64_t                    u64CallHostArg;
     351    /** The Ring-0 jmp buffer. */
     352    VMMR0JMPBUF                 CallHostR0JmpBuf;
     353    /** @} */
     354
    355355} VMMCPU;
    356356/** Pointer to VMMCPU. */
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r19406 r19434  
    308308VMMR0DECL(int) VMMR0CallHost(PVM pVM, VMMCALLHOST enmOperation, uint64_t uArg)
    309309{
     310    PVMCPU pVCpu = VMMGetCpu(pVM);
     311
    310312/** @todo profile this! */
    311     pVM->vmm.s.enmCallHostOperation = enmOperation;
    312     pVM->vmm.s.u64CallHostArg = uArg;
    313     pVM->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
    314     int rc = vmmR0CallHostLongJmp(&pVM->vmm.s.CallHostR0JmpBuf, VINF_VMM_CALL_HOST);
     313    pVCpu->vmm.s.enmCallHostOperation = enmOperation;
     314    pVCpu->vmm.s.u64CallHostArg = uArg;
     315    pVCpu->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
     316    int rc = vmmR0CallHostLongJmp(&pVCpu->vmm.s.CallHostR0JmpBuf, VINF_VMM_CALL_HOST);
    315317    if (rc == VINF_SUCCESS)
    316         rc = pVM->vmm.s.rcCallHost;
     318        rc = pVCpu->vmm.s.rcCallHost;
    317319    return rc;
    318320}
     
    323325 * Record return code statistics
    324326 * @param   pVM         The VM handle.
     327 * @param   pVCpu       The VMCPU handle.
    325328 * @param   rc          The status code.
    326329 */
    327 static void vmmR0RecordRC(PVM pVM, int rc)
     330static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
    328331{
    329332    /*
     
    438441            break;
    439442        case VINF_VMM_CALL_HOST:
    440             switch (pVM->vmm.s.enmCallHostOperation)
     443            switch (pVCpu->vmm.s.enmCallHostOperation)
    441444            {
    442445                case VMMCALLHOST_PDM_LOCK:
     
    590593#ifdef VBOX_WITH_STATISTICS
    591594                STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
    592                 vmmR0RecordRC(pVM, rc);
     595                vmmR0RecordRC(pVM, pVCpu, rc);
    593596#endif
    594597            }
     
    623626                if (RT_SUCCESS(rc))
    624627                {
    625                     rc = vmmR0CallHostSetJmp(&pVM->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
     628                    rc = vmmR0CallHostSetJmp(&pVCpu->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
    626629                    int rc2 = HWACCMR0Leave(pVM, pVCpu);
    627630                    AssertRC(rc2);
     
    639642
    640643#ifdef VBOX_WITH_STATISTICS
    641             vmmR0RecordRC(pVM, rc);
     644            vmmR0RecordRC(pVM, pVCpu, rc);
    642645#endif
    643646            /* No special action required for external interrupts, just return. */
     
    10841087            case VMMR0_DO_VMMR0_TERM:
    10851088            {
    1086                 if (!pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack)
     1089                PVMCPU pVCpu = &pVM->aCpus[idCpu];
     1090
     1091                if (!pVCpu->vmm.s.CallHostR0JmpBuf.pvSavedStack)
    10871092                    break;
    10881093
     
    10951100                Args.u64Arg = u64Arg;
    10961101                Args.pSession = pSession;
    1097                 return vmmR0CallHostSetJmpEx(&pVM->vmm.s.CallHostR0JmpBuf, vmmR0EntryExWrapper, &Args);
     1102                return vmmR0CallHostSetJmpEx(&pVCpu->vmm.s.CallHostR0JmpBuf, vmmR0EntryExWrapper, &Args);
    10981103            }
    10991104
     
    11411146    }
    11421147
     1148    PVMCPU pVCpu = VMMGetCpu(pVM);
     1149
    11431150    /*
    11441151     * Check that the jump buffer is armed.
    11451152     */
    11461153#ifdef RT_ARCH_X86
    1147     if (    !pVM->vmm.s.CallHostR0JmpBuf.eip
    1148         ||  pVM->vmm.s.CallHostR0JmpBuf.fInRing3Call)
     1154    if (    !pVCpu->vmm.s.CallHostR0JmpBuf.eip
     1155        ||  pVCpu->vmm.s.CallHostR0JmpBuf.fInRing3Call)
    11491156#else
    1150     if (    !pVM->vmm.s.CallHostR0JmpBuf.rip
    1151         ||  pVM->vmm.s.CallHostR0JmpBuf.fInRing3Call)
     1157    if (    !pVCpu->vmm.s.CallHostR0JmpBuf.rip
     1158        ||  pVCpu->vmm.s.CallHostR0JmpBuf.fInRing3Call)
    11521159#endif
    11531160    {
     
    12001207    if (pVM)
    12011208    {
     1209        PVMCPU pVCpu = VMMGetCpu(pVM);
     1210
    12021211#ifdef RT_ARCH_X86
    1203         if (    pVM->vmm.s.CallHostR0JmpBuf.eip
    1204             &&  !pVM->vmm.s.CallHostR0JmpBuf.fInRing3Call)
     1212        if (    pVCpu->vmm.s.CallHostR0JmpBuf.eip
     1213            &&  !pVCpu->vmm.s.CallHostR0JmpBuf.fInRing3Call)
    12051214#else
    1206         if (    pVM->vmm.s.CallHostR0JmpBuf.rip
    1207             &&  !pVM->vmm.s.CallHostR0JmpBuf.fInRing3Call)
     1215        if (    pVCpu->vmm.s.CallHostR0JmpBuf.rip
     1216            &&  !pVCpu->vmm.s.CallHostR0JmpBuf.fInRing3Call)
    12081217#endif
    12091218        {
  • trunk/src/VBox/VMM/VMMTests.cpp

    r19334 r19434  
    6464
    6565    CPUMHyperSetCtxCore(pVCpu, NULL);
    66     memset(pVM->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE);
    67     CPUMSetHyperESP(pVCpu, pVM->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
     66    memset(pVCpu->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE);
     67    CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
    6868    CPUMPushHyper(pVCpu, uVariation);
    6969    CPUMPushHyper(pVCpu, enmTestcase);
     
    104104
    105105    CPUMHyperSetCtxCore(pVCpu, NULL);
    106     memset(pVM->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE);
    107     CPUMSetHyperESP(pVCpu, pVM->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
     106    memset(pVCpu->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE);
     107    CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
    108108    CPUMPushHyper(pVCpu, uVariation);
    109109    CPUMPushHyper(pVCpu, u8Trap + VMMGC_DO_TESTCASE_TRAP_FIRST);
     
    342342         */
    343343        CPUMHyperSetCtxCore(pVCpu, NULL);
    344         CPUMSetHyperESP(pVCpu, pVM->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
     344        CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
    345345        CPUMPushHyper(pVCpu, 0);
    346346        CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HYPER_INTERRUPT);
     
    406406        {
    407407            CPUMHyperSetCtxCore(pVCpu, NULL);
    408             CPUMSetHyperESP(pVCpu, pVM->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
     408            CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
    409409            CPUMPushHyper(pVCpu, 0);
    410410            CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_NOP);
     
    540540            CPUMHyperSetCtxCore(pVCpu, NULL);
    541541
    542             CPUMSetHyperESP(pVCpu, pVM->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
     542            CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
    543543            CPUMPushHyper(pVCpu, 0);
    544544            CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HWACCM_NOP);
  • trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp

    r19366 r19434  
    873873    GEN_CHECK_OFF(VMM, pfnCPUMRCResumeGuestV86);
    874874    GEN_CHECK_OFF(VMM, iLastGZRc);
    875     GEN_CHECK_OFF(VMM, pbEMTStackR3);
    876     GEN_CHECK_OFF(VMM, pbEMTStackRC);
    877     GEN_CHECK_OFF(VMM, pbEMTStackBottomRC);
     875    GEN_CHECK_OFF(VMMCPU, pbEMTStackR3);
     876    GEN_CHECK_OFF(VMMCPU, pbEMTStackRC);
     877    GEN_CHECK_OFF(VMMCPU, pbEMTStackBottomRC);
    878878    GEN_CHECK_OFF(VMM, pRCLoggerRC);
    879879    GEN_CHECK_OFF(VMM, pRCLoggerR3);
     
    884884    GEN_CHECK_OFF(VMM, cYieldResumeMillies);
    885885    GEN_CHECK_OFF(VMM, cYieldEveryMillies);
    886     GEN_CHECK_OFF(VMM, enmCallHostOperation);
    887     GEN_CHECK_OFF(VMM, rcCallHost);
    888     GEN_CHECK_OFF(VMM, u64CallHostArg);
    889     GEN_CHECK_OFF(VMM, CallHostR0JmpBuf);
    890     GEN_CHECK_OFF(VMM, CallHostR0JmpBuf.SpCheck);
    891     GEN_CHECK_OFF(VMM, CallHostR0JmpBuf.SpResume);
     886    GEN_CHECK_OFF(VMMCPU, enmCallHostOperation);
     887    GEN_CHECK_OFF(VMMCPU, rcCallHost);
     888    GEN_CHECK_OFF(VMMCPU, u64CallHostArg);
     889    GEN_CHECK_OFF(VMMCPU, CallHostR0JmpBuf);
     890    GEN_CHECK_OFF(VMMCPU, CallHostR0JmpBuf.SpCheck);
     891    GEN_CHECK_OFF(VMMCPU, CallHostR0JmpBuf.SpResume);
    892892    GEN_CHECK_OFF(VMM, StatRunRC);
    893893    GEN_CHECK_OFF(VMM, StatRZCallPGMLock);
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r19405 r19434  
    196196    CHECK_MEMBER_ALIGNMENT(VM, cpum.s.GuestEntry, 64);
    197197
    198     CHECK_MEMBER_ALIGNMENT(VM, vmm.s.CallHostR0JmpBuf, 8);
     198    CHECK_MEMBER_ALIGNMENT(VMCPU, vmm.s.CallHostR0JmpBuf, 8);
    199199    CHECK_MEMBER_ALIGNMENT(VM, vmm.s.StatRunRC, 8);
    200200    CHECK_MEMBER_ALIGNMENT(VM, StatTotalQemuToGC, 8);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette