VirtualBox

Changeset 45749 in vbox for trunk


Ignore:
Timestamp:
Apr 26, 2013 12:14:09 AM (12 years ago)
Author:
vboxsync
Message:

VMM: Introduced VMCPUSTATE_STARTED_HM for indicating that we're between HMR3Enter and HMR3Leave. Added HMIsInHwVirtCtx and VMMIsLongJumpArmed methods/macros.

Location:
trunk
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/hm.h

    r45701 r45749  
    8888 */
    8989#define HMCanEmulateIoBlockEx(a_pCtx)   (!CPUMIsGuestInPagedProtectedModeEx(a_pCtx))
     90
     91/**
     92 * Checks whether we're in the special hardware virtualization context.
     93 * @returns true / false.
     94 * @param   a_pVCpu     The caller's cross context virtual CPU structure.
     95 * @thread  EMT
     96 */
     97#ifdef IN_RING0
     98# define HMIsInHwVirtCtx(a_pVCpu)       (VMCPU_GET_STATE(a_pVCpu) == VMCPUSTATE_STARTED_HM)
     99#else
     100# define HMIsInHwVirtCtx(a_pVCpu)       (false)
     101#endif
     102
    90103
    91104VMMDECL(bool)                   HMIsEnabledNotMacro(PVM pVM);
  • trunk/include/VBox/vmm/vm.h

    r45701 r45749  
    6969    /** CPU started. */
    7070    VMCPUSTATE_STARTED,
    71     /** Executing guest code and can be poked. */
     71    /** CPU started in HM context. */
     72    VMCPUSTATE_STARTED_HM,
     73    /** Executing guest code and can be poked (RC or STI bits of HM). */
    7274    VMCPUSTATE_STARTED_EXEC,
    7375    /** Executing guest code in the recompiler. */
  • trunk/include/VBox/vmm/vmm.h

    r45701 r45749  
    230230
    231231
     232/**
     233 * Checks whether we've armed the ring-0 long jump machinery.
     234 *
     235 * @returns @c true / @c false
     236 * @param   pVCpu           The caller's cross context virtual CPU structure.
     237 * @thread  EMT
     238 * @sa      VMMR0IsLongJumpArmed
     239 */
     240#ifdef IN_RING0
     241# define VMMIsLongJumpArmed(a_pVCpu)                VMMR0IsLongJumpArmed(a_pVCpu)
     242#else
     243# define VMMIsLongJumpArmed(a_pVCpu)                (false)
     244#endif
     245
     246
    232247VMM_INT_DECL(RTRCPTR)       VMMGetStackRC(PVMCPU pVCpu);
    233248VMMDECL(VMCPUID)            VMMGetCpuId(PVM pVM);
     
    238253VMM_INT_DECL(uint32_t)      VMMGetSvnRev(void);
    239254VMM_INT_DECL(VMMSWITCHER)   VMMGetSwitcher(PVM pVM);
     255VMM_INT_DECL(bool)          VMMIsInRing3Call(PVMCPU pVCpu);
    240256VMM_INT_DECL(void)          VMMTrashVolatileXMMRegs(void);
    241257
     
    479495VMMR0DECL(int)      VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION);
    480496VMMR0DECL(int)      VMMR0TermVM(PVM pVM, PGVM pGVM);
     497VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu);
     498
    481499
    482500#ifdef LOG_ENABLED
  • trunk/src/VBox/VMM/VMMAll/VMMAll.cpp

    r44394 r45749  
    343343}
    344344
     345
     346/**
     347 * Checks whether we're in a ring-3 call or not.
     348 *
     349 * @returns true / false.
     350 * @param   pVCpu               The caller's cross context VM structure.
     351 * @thread  EMT
     352 */
     353VMM_INT_DECL(bool) VMMIsInRing3Call(PVMCPU pVCpu)
     354{
     355#ifdef RT_ARCH_X86
     356    return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
     357#else
     358    return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
     359#endif
     360}
     361
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r45378 r45749  
    14911491        CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
    14921492
    1493         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     1493        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; /** @todo r=bird: Why HM_CHANGED_GUEST_CR0?? */
    14941494        Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    14951495    }
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r45734 r45749  
    64876487     * This is why this is done after all possible exits-to-ring-3 paths in this code.
    64886488     */
     6489    /** @todo r=bird: You reverse the effect of calling PDMGetInterrupt by
     6490     *        handing it over to TPRM like we do in REMR3StateBack using
     6491     *        TRPMAssertTrap and the other setters. */
    64896492    rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
    64906493    AssertRCReturn(rc, rc);
     
    66126615    TMNotifyEndOfExecution(pVCpu);                              /* Notify TM that the guest is no longer running. */
    66136616    Assert(!(ASMGetFlags() & X86_EFL_IF));
    6614     VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
     6617    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
    66156618
    66166619    /* Restore the effects of TPR patching if any. */
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r45531 r45749  
    15441544
    15451545    TMNotifyEndOfExecution(pVCpu);
    1546     VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
     1546    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
    15471547    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
    15481548    ASMSetFlags(uOldEFlags);
     
    28252825
    28262826    /* Just set the correct state here instead of trying to catch every goto above. */
    2827     VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC);
     2827    VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    28282828
    28292829#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r45655 r45749  
    33813381
    33823382    TMNotifyEndOfExecution(pVCpu);
    3383     VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
     3383    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
    33843384    Assert(!(ASMGetFlags() & X86_EFL_IF));
    33853385
     
    50425042
    50435043    /* Just set the correct state here instead of trying to catch every goto above. */
    5044     VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC);
     5044    VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    50455045
    50465046#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r45701 r45749  
    762762                if (RT_SUCCESS(rc))
    763763                {
     764                    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
     765
    764766                    rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
    765767                    int rc2 = HMR0Leave(pVM, pVCpu);
    766768                    AssertRC(rc2);
     769
     770                    VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
     771                    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
    767772                }
    768773                STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
     
    13681373    return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
    13691374}
     1375
     1376
     1377/**
     1378 * Checks whether we've armed the ring-0 long jump machinery.
     1379 *
     1380 * @returns @c true / @c false
     1381 * @param   pVCpu           The caller's cross context virtual CPU structure.
     1382 * @thread  EMT
     1383 * @sa      VMMIsLongJumpArmed
     1384 */
     1385VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
     1386{
     1387#ifdef RT_ARCH_X86
     1388    return pVCpu->vmm.s.CallRing3JmpBufR0.eip
     1389        && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
     1390#else
     1391    return pVCpu->vmm.s.CallRing3JmpBufR0.rip
     1392        && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
     1393#endif
     1394}
     1395
    13701396
    13711397/**
  • trunk/src/VBox/VMM/VMMR3/PDMDevHlp.cpp

    r45645 r45749  
    33353335    if (pVM->cCpus > 1)
    33363336    {
    3337         /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
     3337        /* We might be holding locks here and could cause a deadlock since
     3338           VMR3PowerOff rendezvous with the other CPUs. */
    33383339        rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)VMR3PowerOff, 1, pVM->pUVM);
    33393340        AssertRC(rc);
    33403341        /* Set the VCPU state to stopped here as well to make sure no
    3341          * inconsistency with the EM state occurs.
    3342          */
     3342           inconsistency with the EM state occurs. */
    33433343        VMCPU_SET_STATE(VMMGetCpu(pVM), VMCPUSTATE_STOPPED);
    33443344        rc = VINF_EM_OFF;
  • trunk/src/VBox/VMM/VMMR3/VMEmt.cpp

    r44528 r45749  
    10991099     * Do the halt.
    11001100     */
    1101     Assert(VMCPU_GET_STATE(pVCpu) == VMCPUSTATE_STARTED);
     1101    VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
    11021102    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED);
    11031103    PUVM pUVM = pUVCpu->pUVM;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette