- Timestamp:
- Apr 26, 2013 12:14:09 AM (12 years ago)
- Location:
- trunk
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm.h
r45701 r45749 88 88 */ 89 89 #define HMCanEmulateIoBlockEx(a_pCtx) (!CPUMIsGuestInPagedProtectedModeEx(a_pCtx)) 90 91 /** 92 * Checks whether we're in the special hardware virtualization context. 93 * @returns true / false. 94 * @param a_pVCpu The caller's cross context virtual CPU structure. 95 * @thread EMT 96 */ 97 #ifdef IN_RING0 98 # define HMIsInHwVirtCtx(a_pVCpu) (VMCPU_GET_STATE(a_pVCpu) == VMCPUSTATE_STARTED_HM) 99 #else 100 # define HMIsInHwVirtCtx(a_pVCpu) (false) 101 #endif 102 90 103 91 104 VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM); -
trunk/include/VBox/vmm/vm.h
r45701 r45749 69 69 /** CPU started. */ 70 70 VMCPUSTATE_STARTED, 71 /** Executing guest code and can be poked. */ 71 /** CPU started in HM context. */ 72 VMCPUSTATE_STARTED_HM, 73 /** Executing guest code and can be poked (RC or STI bits of HM). */ 72 74 VMCPUSTATE_STARTED_EXEC, 73 75 /** Executing guest code in the recompiler. */ -
trunk/include/VBox/vmm/vmm.h
r45701 r45749 230 230 231 231 232 /** 233 * Checks whether we've armed the ring-0 long jump machinery. 234 * 235 * @returns @c true / @c false 236 * @param pVCpu The caller's cross context virtual CPU structure. 237 * @thread EMT 238 * @sa VMMR0IsLongJumpArmed 239 */ 240 #ifdef IN_RING0 241 # define VMMIsLongJumpArmed(a_pVCpu) VMMR0IsLongJumpArmed(a_pVCpu) 242 #else 243 # define VMMIsLongJumpArmed(a_pVCpu) (false) 244 #endif 245 246 232 247 VMM_INT_DECL(RTRCPTR) VMMGetStackRC(PVMCPU pVCpu); 233 248 VMMDECL(VMCPUID) VMMGetCpuId(PVM pVM); … … 238 253 VMM_INT_DECL(uint32_t) VMMGetSvnRev(void); 239 254 VMM_INT_DECL(VMMSWITCHER) VMMGetSwitcher(PVM pVM); 255 VMM_INT_DECL(bool) VMMIsInRing3Call(PVMCPU pVCpu); 240 256 VMM_INT_DECL(void) VMMTrashVolatileXMMRegs(void); 241 257 … … 479 495 VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION); 480 496 VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM); 497 VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu); 498 481 499 482 500 #ifdef LOG_ENABLED -
trunk/src/VBox/VMM/VMMAll/VMMAll.cpp
r44394 r45749 343 343 } 344 344 345 346 /** 347 * Checks whether we're in a ring-3 call or not. 348 * 349 * @returns true / false. 350 * @param pVCpu The caller's cross context VM structure. 351 * @thread EMT 352 */ 353 VMM_INT_DECL(bool) VMMIsInRing3Call(PVMCPU pVCpu) 354 { 355 #ifdef RT_ARCH_X86 356 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call; 357 #else 358 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call; 359 #endif 360 } 361 -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r45378 r45749 1491 1491 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx); 1492 1492 1493 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 1493 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; /** @todo r=bird: Why HM_CHANGED_GUEST_CR0?? */ 1494 1494 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1495 1495 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45734 r45749 6487 6487 * This is why this is done after all possible exits-to-ring-3 paths in this code. 6488 6488 */ 6489 /** @todo r=bird: You reverse the effect of calling PDMGetInterrupt by 6490 * handing it over to TPRM like we do in REMR3StateBack using 6491 * TRPMAssertTrap and the other setters. */ 6489 6492 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx); 6490 6493 AssertRCReturn(rc, rc); … … 6612 6615 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */ 6613 6616 Assert(!(ASMGetFlags() & X86_EFL_IF)); 6614 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED );6617 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 6615 6618 6616 6619 /* Restore the effects of TPR patching if any. */ -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r45531 r45749 1544 1544 1545 1545 TMNotifyEndOfExecution(pVCpu); 1546 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED );1546 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 1547 1547 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x); 1548 1548 ASMSetFlags(uOldEFlags); … … 2825 2825 2826 2826 /* Just set the correct state here instead of trying to catch every goto above. */ 2827 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED , VMCPUSTATE_STARTED_EXEC);2827 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 2828 2828 2829 2829 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r45655 r45749 3381 3381 3382 3382 TMNotifyEndOfExecution(pVCpu); 3383 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED );3383 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 3384 3384 Assert(!(ASMGetFlags() & X86_EFL_IF)); 3385 3385 … … 5042 5042 5043 5043 /* Just set the correct state here instead of trying to catch every goto above. */ 5044 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED , VMCPUSTATE_STARTED_EXEC);5044 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 5045 5045 5046 5046 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r45701 r45749 762 762 if (RT_SUCCESS(rc)) 763 763 { 764 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 765 764 766 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */ 765 767 int rc2 = HMR0Leave(pVM, pVCpu); 766 768 AssertRC(rc2); 769 770 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 771 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED); 767 772 } 768 773 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC); … … 1368 1373 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession); 1369 1374 } 1375 1376 1377 /** 1378 * Checks whether we've armed the ring-0 long jump machinery. 1379 * 1380 * @returns @c true / @c false 1381 * @param pVCpu The caller's cross context virtual CPU structure. 1382 * @thread EMT 1383 * @sa VMMIsLongJumpArmed 1384 */ 1385 VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu) 1386 { 1387 #ifdef RT_ARCH_X86 1388 return pVCpu->vmm.s.CallRing3JmpBufR0.eip 1389 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call; 1390 #else 1391 return pVCpu->vmm.s.CallRing3JmpBufR0.rip 1392 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call; 1393 #endif 1394 } 1395 1370 1396 1371 1397 /** -
trunk/src/VBox/VMM/VMMR3/PDMDevHlp.cpp
r45645 r45749 3335 3335 if (pVM->cCpus > 1) 3336 3336 { 3337 /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */ 3337 /* We might be holding locks here and could cause a deadlock since 3338 VMR3PowerOff rendezvous with the other CPUs. */ 3338 3339 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)VMR3PowerOff, 1, pVM->pUVM); 3339 3340 AssertRC(rc); 3340 3341 /* Set the VCPU state to stopped here as well to make sure no 3341 * inconsistency with the EM state occurs. 3342 */ 3342 inconsistency with the EM state occurs. */ 3343 3343 VMCPU_SET_STATE(VMMGetCpu(pVM), VMCPUSTATE_STOPPED); 3344 3344 rc = VINF_EM_OFF; -
trunk/src/VBox/VMM/VMMR3/VMEmt.cpp
r44528 r45749 1099 1099 * Do the halt. 1100 1100 */ 1101 Assert(VMCPU_GET_STATE(pVCpu) ==VMCPUSTATE_STARTED);1101 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED); 1102 1102 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED); 1103 1103 PUVM pUVM = pUVCpu->pUVM;
Note:
See TracChangeset
for help on using the changeset viewer.