- Timestamp:
- May 13, 2009 2:09:15 PM (16 years ago)
- Location:
- trunk
- Files:
-
- 18 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/rem.h
r18927 r19660 93 93 REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu); 94 94 REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu); 95 REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM );95 REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst); 96 96 REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM); 97 97 REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM); -
trunk/include/VBox/tm.h
r19507 r19660 96 96 */ 97 97 VMMDECL(uint64_t) TMVirtualGet(PVM pVM); 98 VMMDECL(uint64_t) TMVirtualGet Ex(PVM pVM, bool fCheckTimers);98 VMMDECL(uint64_t) TMVirtualGetNoCheck(PVM pVM); 99 99 VMMDECL(uint64_t) TMVirtualSyncGetLag(PVM pVM); 100 100 VMMDECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVM pVM); 101 101 VMMDECL(uint64_t) TMVirtualGetFreq(PVM pVM); 102 VMMDECL(uint64_t) TMVirtualSyncGet(PVM pVM); 103 VMMDECL(uint64_t) TMVirtualSyncGetNoCheck(PVM pVM); 102 104 VMMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers); 103 VMMDECL(uint64_t) TMVirtualSyncGet(PVM pVM);104 105 VMMDECL(int) TMVirtualResume(PVM pVM); 105 106 VMMDECL(int) TMVirtualPause(PVM pVM); … … 199 200 VMMDECL(int) TMTimerStop(PTMTIMER pTimer); 200 201 VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer); 201 VMMDECL(uint64_t) TMTimerPoll(PVM pVM );202 VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, uint64_t *pu64Delta);202 VMMDECL(uint64_t) TMTimerPoll(PVM pVM, PVMCPU pVCpu); 203 VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta); 203 204 204 205 /** @} */ -
trunk/include/VBox/vm.h
r19593 r19660 241 241 * @{ 242 242 */ 243 /** This action forces the VM to schedule and run pending timer (TM). */ 244 #define VM_FF_TIMER RT_BIT_32(2) 243 /** The virtual sync clock has been stopped, go to TM until it has been 244 * restarted... */ 245 #define VM_FF_TM_VIRTUAL_SYNC RT_BIT_32(2) 245 246 /** PDM Queues are pending. */ 246 247 #define VM_FF_PDM_QUEUES_BIT 3 … … 278 279 /** This action forces the VM to service check and pending interrups on the PIC. */ 279 280 #define VMCPU_FF_INTERRUPT_PIC RT_BIT_32(1) 280 /** This action forces the VM to schedule and run pending timer (TM). (bogus for now; needed for PATM backwards compatibility) */ 281 /** This action forces the VM to schedule and run pending timer (TM). 282 * @remarks Don't move - PATM compatability. */ 281 283 #define VMCPU_FF_TIMER RT_BIT_32(2) 282 284 /** PDM critical section unlocking is pending, process promptly upon return to R3. */ … … 316 318 317 319 /** Externally forced VM actions. Used to quit the idle/wait loop. */ 318 #define VM_FF_EXTERNAL_HALTED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_ TIMER | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)320 #define VM_FF_EXTERNAL_HALTED_MASK (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA) 319 321 /** Externally forced VMCPU actions. Used to quit the idle/wait loop. */ 320 #define VMCPU_FF_EXTERNAL_HALTED_MASK (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST )322 #define VMCPU_FF_EXTERNAL_HALTED_MASK (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_TIMER) 321 323 322 324 /** High priority VM pre-execution actions. */ 323 #define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TIMER| VM_FF_DEBUG_SUSPEND \324 | 325 #define VM_FF_HIGH_PRIORITY_PRE_MASK ( VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TM_VIRTUAL_SYNC | VM_FF_DEBUG_SUSPEND \ 326 | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY) 325 327 /** High priority VMCPU pre-execution actions. */ 326 #define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \ 327 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT) 328 #define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 \ 329 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \ 330 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS) 328 331 329 332 /** High priority VM pre raw-mode execution mask. */ 330 333 #define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK (VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY) 331 334 /** High priority VMCPU pre raw-mode execution mask. */ 332 #define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT\333 | VMCPU_FF_ INHIBIT_INTERRUPTS)335 #define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \ 336 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS) 334 337 335 338 /** High priority post-execution actions. */ … … 352 355 353 356 /** VM Flags that cause the HWACCM loops to go back to ring-3. */ 354 #define VM_FF_HWACCM_TO_R3_MASK (VM_FF_T IMER| VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)357 #define VM_FF_HWACCM_TO_R3_MASK (VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY) 355 358 /** VMCPU Flags that cause the HWACCM loops to go back to ring-3. */ 356 #define VMCPU_FF_HWACCM_TO_R3_MASK (VMCPU_FF_TO_R3 )359 #define VMCPU_FF_HWACCM_TO_R3_MASK (VMCPU_FF_TO_R3 | VMCPU_FF_TIMER) 357 360 358 361 /** All the forced VM flags. */ -
trunk/src/VBox/VMM/EM.cpp
r19658 r19660 969 969 * imporant FFs while we were busy switching the state. So, check again. 970 970 */ 971 if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_ TIMER | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_RESET)972 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ REQUEST))971 if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_RESET) 972 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST)) 973 973 { 974 974 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions)); … … 1018 1018 */ 1019 1019 #ifdef VBOX_HIGH_RES_TIMERS_HACK 1020 TMTimerPoll(pVM );1020 TMTimerPoll(pVM, pVCpu); 1021 1021 #endif 1022 AssertCompile((VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)) & VMCPU_FF_TIMER); 1022 1023 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK) 1023 1024 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE))) … … 2938 2939 */ 2939 2940 #ifdef VBOX_HIGH_RES_TIMERS_HACK 2940 TMTimerPoll(pVM );2941 TMTimerPoll(pVM, pVCpu); 2941 2942 #endif 2942 2943 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d); … … 3079 3080 */ 3080 3081 #ifdef VBOX_HIGH_RES_TIMERS_HACK 3081 TMTimerPoll(pVM );3082 TMTimerPoll(pVM, pVCpu); 3082 3083 #endif 3083 3084 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_MASK) … … 3121 3122 3122 3123 /* 3123 * We stay in the wait for SIPI state unless explicitly told otherwise. 3124 * We stay in the wait for SIPI state unless explicitly told otherwise. 3124 3125 */ 3125 3126 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI) … … 3382 3383 3383 3384 /* check that we got them all */ 3384 Assert (!(VM_FF_NORMAL_PRIORITY_POST_MASK & ~(VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY)));3385 Assert (!(VMCPU_FF_NORMAL_PRIORITY_POST_MASK & ~(VMCPU_FF_CSAM_SCAN_PAGE)));3385 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY)); 3386 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_CSAM_SCAN_PAGE); 3386 3387 } 3387 3388 … … 3428 3429 3429 3430 /* check that we got them all */ 3430 Assert (!(VM_FF_NORMAL_PRIORITY_MASK & ~(VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)));3431 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)); 3431 3432 } 3432 3433 … … 3467 3468 * Timers before interrupts. 3468 3469 */ 3469 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_TIMER, VM_FF_PGM_NO_MEMORY)) 3470 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER) 3471 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) 3470 3472 TMR3TimerQueuesDo(pVM); 3471 3473 … … 3473 3475 * The instruction following an emulated STI should *always* be executed! 3474 3476 */ 3475 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)3476 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))3477 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 3478 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) 3477 3479 { 3478 3480 Log(("VM_FF_EMULATED_STI at %RGv successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu))); … … 3576 3578 3577 3579 #endif 3580 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC)) 3581 { 3582 /** @todo FIXME */ 3583 } 3584 3578 3585 /* check that we got them all */ 3579 Assert (!(VM_FF_HIGH_PRIORITY_PRE_MASK & ~(VM_FF_TIMER | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)));3580 Assert (!(VMCPU_FF_HIGH_PRIORITY_PRE_MASK & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)));3586 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)); 3587 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)); 3581 3588 } 3582 3589 … … 3746 3753 break; 3747 3754 3748 /* 3755 /* 3749 3756 * Switch to the wait for SIPI state (application processor only) 3750 3757 */ -
trunk/src/VBox/VMM/PATM/PATM.cpp
r19293 r19660 113 113 114 114 /* These values can't change as they are hardcoded in patch code (old saved states!) */ 115 AssertCompile(VM _FF_TIMER == VMCPU_FF_TIMER);115 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2)); 116 116 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST); 117 117 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0)); -
trunk/src/VBox/VMM/REMInternal.h
r19297 r19660 132 132 /** In REM mode. 133 133 * I.e. the correct CPU state and some other bits are with REM. */ 134 bool 134 bool volatile fInREM; 135 135 /** In REMR3State. */ 136 136 bool fInStateSync; … … 276 276 277 277 #else /* !VBOX_WITH_STATISTICS */ 278 # define remR3ProfileStart(c) 279 # define remR3ProfileStop(c) 278 # define remR3ProfileStart(c) do { } while (0) 279 # define remR3ProfileStop(c) do { } while (0) 280 280 #endif /* !VBOX_WITH_STATISTICS */ 281 281 -
trunk/src/VBox/VMM/TM.cpp
r19609 r19660 199 199 200 200 pVM->tm.s.offVM = RT_OFFSETOF(VM, tm.s); 201 pVM->tm.s.idTimerCpu = pVM->cCPUs - 1; /* The last CPU. */ 201 202 pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].enmClock = TMCLOCK_VIRTUAL; 202 203 pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].u64Expire = INT64_MAX; … … 207 208 pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].enmClock = TMCLOCK_TSC; 208 209 pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].u64Expire = INT64_MAX; 210 209 211 210 212 /* … … 551 553 STAM_REG(pVM, &pVM->tm.s.StatDoQueuesRun, STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Run", STAMUNIT_TICKS_PER_CALL, "The run part."); 552 554 553 STAM_REG(pVM, &pVM->tm.s.StatPollAlreadySet, STAMTYPE_COUNTER, "/TM/PollAlreadySet", STAMUNIT_OCCURENCES, "TMTimerPoll calls where the FF was already set."); 554 STAM_REG(pVM, &pVM->tm.s.StatPollVirtual, STAMTYPE_COUNTER, "/TM/PollHitsVirtual", STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL queue."); 555 STAM_REG(pVM, &pVM->tm.s.StatPollVirtualSync, STAMTYPE_COUNTER, "/TM/PollHitsVirtualSync", STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL_SYNC queue."); 556 STAM_REG(pVM, &pVM->tm.s.StatPollMiss, STAMTYPE_COUNTER, "/TM/PollMiss", STAMUNIT_OCCURENCES, "TMTimerPoll calls where nothing had expired."); 555 STAM_REG(pVM, &pVM->tm.s.StatPoll, STAMTYPE_COUNTER, "/TM/Poll", STAMUNIT_OCCURENCES, "TMTimerPoll calls."); 556 STAM_REG(pVM, &pVM->tm.s.StatPollAlreadySet, STAMTYPE_COUNTER, "/TM/Poll/AlreadySet", STAMUNIT_OCCURENCES, "TMTimerPoll calls where the FF was already set."); 557 STAM_REG(pVM, &pVM->tm.s.StatPollVirtual, STAMTYPE_COUNTER, "/TM/Poll/HitsVirtual", STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL queue."); 558 STAM_REG(pVM, &pVM->tm.s.StatPollVirtualSync, STAMTYPE_COUNTER, "/TM/Poll/HitsVirtualSync", STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL_SYNC queue."); 559 STAM_REG(pVM, &pVM->tm.s.StatPollMiss, STAMTYPE_COUNTER, "/TM/Poll/Miss", STAMUNIT_OCCURENCES, "TMTimerPoll calls where nothing had expired."); 560 STAM_REG(pVM, &pVM->tm.s.StatPollRunning, STAMTYPE_COUNTER, "/TM/Poll/Running", STAMUNIT_OCCURENCES, "TMTimerPoll calls where the queues were being run."); 561 562 STAM_REG(pVM, &pVM->tm.s.StatPollGIP, STAMTYPE_COUNTER, "/TM/PollGIP", STAMUNIT_OCCURENCES, "TMTimerPollGIP calls."); 563 STAM_REG(pVM, &pVM->tm.s.StatPollGIPAlreadySet, STAMTYPE_COUNTER, "/TM/PollGIP/AlreadySet", STAMUNIT_OCCURENCES, "TMTimerPollGIP calls where the FF was already set."); 564 STAM_REG(pVM, &pVM->tm.s.StatPollGIPVirtual, STAMTYPE_COUNTER, "/TM/PollGIP/HitsVirtual", STAMUNIT_OCCURENCES, "The number of times TMTimerPollGIP found an expired TMCLOCK_VIRTUAL queue."); 565 STAM_REG(pVM, &pVM->tm.s.StatPollGIPVirtualSync, STAMTYPE_COUNTER, "/TM/PollGIP/HitsVirtualSync", STAMUNIT_OCCURENCES, "The number of times TMTimerPollGIP found an expired TMCLOCK_VIRTUAL_SYNC queue."); 566 STAM_REG(pVM, &pVM->tm.s.StatPollGIPMiss, STAMTYPE_COUNTER, "/TM/PollGIP/Miss", STAMUNIT_OCCURENCES, "TMTimerPollGIP calls where nothing had expired."); 567 STAM_REG(pVM, &pVM->tm.s.StatPollGIPRunning, STAMTYPE_COUNTER, "/TM/PollGIP/Running", STAMUNIT_OCCURENCES, "TMTimerPollGIP calls where the queues were being run."); 557 568 558 569 STAM_REG(pVM, &pVM->tm.s.StatPostponedR3, STAMTYPE_COUNTER, "/TM/PostponedR3", STAMUNIT_OCCURENCES, "Postponed due to unschedulable state, in ring-3."); … … 940 951 if (pVM->tm.s.fVirtualSyncCatchUp) 941 952 { 942 const uint64_t offVirtualNow = TMVirtualGet Ex(pVM, false /* don't check timers */);943 const uint64_t offVirtualSyncNow = TMVirtualSyncGet Ex(pVM, false /* don't check timers */);953 const uint64_t offVirtualNow = TMVirtualGetNoCheck(pVM); 954 const uint64_t offVirtualSyncNow = TMVirtualSyncGetNoCheck(pVM); 944 955 if (pVM->tm.s.fVirtualSyncCatchUp) 945 956 { … … 965 976 #endif 966 977 967 VM_FF_CLEAR(pVM, VM_FF_TIMER); 978 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 979 VMCPU_FF_CLEAR(pVCpuDst, VMCPU_FF_TIMER); /** @todo FIXME: this isn't right. */ 968 980 tmUnlock(pVM); 969 981 } … … 1138 1150 * Make sure timers get rescheduled immediately. 1139 1151 */ 1140 VM_FF_SET(pVM, VM_FF_TIMER); 1152 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 1153 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 1141 1154 1142 1155 return VINF_SUCCESS; … … 1604 1617 * TMVirtualSyncGet only permits EMT. 1605 1618 */ 1606 uint64_t u64Now = TMVirtualGet (pVM);1619 uint64_t u64Now = TMVirtualGetNoCheck(pVM); 1607 1620 if (pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64Now) 1608 1621 return true; … … 1638 1651 static DECLCALLBACK(void) tmR3TimerCallback(PRTTIMER pTimer, void *pvUser, uint64_t /*iTick*/) 1639 1652 { 1640 PVM pVM = (PVM)pvUser; 1653 PVM pVM = (PVM)pvUser; 1654 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 1655 1656 RTLogFlush(NULL); 1657 1641 1658 AssertCompile(TMCLOCK_MAX == 4); 1642 1659 #ifdef DEBUG_Sander /* very annoying, keep it private. */ 1643 if (VM _FF_ISSET(pVM, VM_FF_TIMER))1660 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) 1644 1661 Log(("tmR3TimerCallback: timer event still pending!!\n")); 1645 1662 #endif 1646 if ( !VM _FF_ISSET(pVM, VM_FF_TIMER)1647 && ( pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].offSchedule 1663 if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER) 1664 && ( pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].offSchedule /** @todo FIXME - reconsider offSchedule as a reason for running the timer queues. */ 1648 1665 || pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].offSchedule 1649 1666 || pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL].offSchedule … … 1651 1668 || tmR3AnyExpiredTimers(pVM) 1652 1669 ) 1653 && !VM _FF_ISSET(pVM, VM_FF_TIMER)1670 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER) 1654 1671 && !pVM->tm.s.fRunningQueues 1655 1672 ) 1656 1673 { 1657 VM_FF_SET(pVM, VM_FF_TIMER); 1658 REMR3NotifyTimerPending(pVM); 1659 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM); 1674 Log5(("TM(%u): FF: 0 -> 1\n", __LINE__)); 1675 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 1676 REMR3NotifyTimerPending(pVM, pVCpuDst); 1677 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo | VMNOTIFYFF_FLAGS_POKE ?*/); 1660 1678 STAM_COUNTER_INC(&pVM->tm.s.StatTimerCallbackSetFF); 1661 1679 } … … 1674 1692 VMMR3DECL(void) TMR3TimerQueuesDo(PVM pVM) 1675 1693 { 1676 /** Note: temporarily restrict this to VCPU 0. */ 1677 if (VMMGetCpuId(pVM) != 0) 1678 return; 1679 1680 /* 1681 * Only one EMT should be doing this at a time. 1682 */ 1683 VM_FF_CLEAR(pVM, VM_FF_TIMER); 1684 if (ASMBitTestAndSet(&pVM->tm.s.fRunningQueues, 0)) 1694 /* 1695 * Only the dedicated timer EMT should do stuff here. 1696 * 1697 * The lock isn't really necessary any longer, but it might come 1698 * in handy when dealing VM_FF_TM_VIRTUAL_SYNC later. 1699 */ 1700 Assert(pVM->tm.s.idTimerCpu < pVM->cCPUs); 1701 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 1702 if ( VMMGetCpu(pVM) != pVCpuDst 1703 || ASMBitTestAndSet(&pVM->tm.s.fRunningQueues, 0)) 1685 1704 { 1686 1705 Assert(pVM->cCPUs > 1); … … 1691 1710 Log2(("TMR3TimerQueuesDo:\n")); 1692 1711 tmLock(pVM); 1712 1713 /* 1714 * Clear the FF before processing the queues but after obtaining the lock. 1715 */ 1716 VMCPU_FF_CLEAR(pVCpuDst, VMCPU_FF_TIMER); 1693 1717 1694 1718 /* … … 1704 1728 tmR3TimerQueueRunVirtualSync(pVM); 1705 1729 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r1); 1730 if (pVM->tm.s.fVirtualSyncTicking) /** @todo move into tmR3TimerQueueRunVirtualSync - FIXME */ 1731 VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC); 1706 1732 1707 1733 /* TMCLOCK_VIRTUAL */ … … 1713 1739 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r2); 1714 1740 1715 #if 0 /** @todo if ever used, remove this and fix the stam prefixes on TMCLOCK_REAL below. */1716 1741 /* TMCLOCK_TSC */ 1717 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s2); 1718 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC]); 1719 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesSchedule, s3); 1720 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesRun, r2); 1721 tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC]); 1722 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r3); 1723 #endif 1742 Assert(!pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].offActive); /* not used */ 1724 1743 1725 1744 /* TMCLOCK_REAL */ … … 1740 1759 1741 1760 /* done */ 1761 tmUnlock(pVM); 1742 1762 ASMAtomicBitClear(&pVM->tm.s.fRunningQueues, 0); 1743 tmUnlock(pVM); 1744 } 1763 } 1764 1765 //__BEGIN_DECLS 1766 //int iomLock(PVM pVM); 1767 //void iomUnlock(PVM pVM); 1768 //__END_DECLS 1745 1769 1746 1770 … … 1801 1825 1802 1826 /* fire */ 1827 // tmUnlock(pVM); 1803 1828 switch (pTimer->enmType) 1804 1829 { 1805 case TMTIMERTYPE_DEV: pTimer->u.Dev.pfnTimer(pTimer->u.Dev.pDevIns, pTimer); break; 1830 case TMTIMERTYPE_DEV: 1831 // iomLock(pVM); 1832 pTimer->u.Dev.pfnTimer(pTimer->u.Dev.pDevIns, pTimer); 1833 // iomUnlock(pVM); 1834 break; 1835 1806 1836 case TMTIMERTYPE_DRV: pTimer->u.Drv.pfnTimer(pTimer->u.Drv.pDrvIns, pTimer); break; 1807 1837 case TMTIMERTYPE_INTERNAL: pTimer->u.Internal.pfnTimer(pVM, pTimer, pTimer->u.Internal.pvUser); break; … … 1811 1841 break; 1812 1842 } 1843 // tmLock(pVM); 1813 1844 1814 1845 /* change the state if it wasn't changed already in the handler. */ … … 1858 1889 * and 2) lag behind at a steady rate. 1859 1890 */ 1860 const uint64_t u64VirtualNow = TMVirtualGet Ex(pVM, false /* don't check timers */);1891 const uint64_t u64VirtualNow = TMVirtualGetNoCheck(pVM); 1861 1892 uint64_t u64Now; 1862 1893 if (!pVM->tm.s.fVirtualSyncTicking) … … 1982 2013 1983 2014 /* calc the slack we've handed out. */ 1984 const uint64_t u64VirtualNow2 = TMVirtualGet Ex(pVM, false /* don't check timers */);2015 const uint64_t u64VirtualNow2 = TMVirtualGetNoCheck(pVM); 1985 2016 Assert(u64VirtualNow2 >= u64VirtualNow); 1986 2017 AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("%RU64 < %RU64\n", pVM->tm.s.u64VirtualSync, u64Now)); -
trunk/src/VBox/VMM/TMInternal.h
r19537 r19660 305 305 * Config variable: TSCNotTiedToHalt (bool) */ 306 306 bool fTSCNotTiedToHalt; 307 bool afAlignment0[6]; /**< alignment padding */ 307 bool afAlignment0[2]; /**< alignment padding */ 308 /** The ID of the virtual CPU that normally runs the timers. */ 309 VMCPUID idTimerCpu; 308 310 /** The number of CPU clock ticks per second (TMCLOCK_TSC). 309 311 * Config variable: TSCTicksPerSecond (64-bit unsigned int) … … 451 453 /** TMTimerPoll 452 454 * @{ */ 455 STAMCOUNTER StatPoll; 453 456 STAMCOUNTER StatPollAlreadySet; 454 457 STAMCOUNTER StatPollVirtual; 455 458 STAMCOUNTER StatPollVirtualSync; 456 459 STAMCOUNTER StatPollMiss; 460 STAMCOUNTER StatPollRunning; 461 /** @} */ 462 /** TMTimerPollGIP 463 * @{ */ 464 STAMCOUNTER StatPollGIP; 465 STAMCOUNTER StatPollGIPAlreadySet; 466 STAMCOUNTER StatPollGIPVirtual; 467 STAMCOUNTER StatPollGIPVirtualSync; 468 STAMCOUNTER StatPollGIPMiss; 469 STAMCOUNTER StatPollGIPRunning; 457 470 /** @} */ 458 471 /** TMTimerSet -
trunk/src/VBox/VMM/VMEmt.cpp
r19539 r19660 302 302 || VMCPU_FF_ISPENDING(pVCpu, fMask)) 303 303 break; 304 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM ));304 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM, pVCpu)); 305 305 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 306 306 || VMCPU_FF_ISPENDING(pVCpu, fMask)) … … 504 504 * Estimate time left to the next event. 505 505 */ 506 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM ));506 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM, pVCpu)); 507 507 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 508 508 || VMCPU_FF_ISPENDING(pVCpu, fMask)) … … 624 624 */ 625 625 uint64_t u64Delta; 626 uint64_t u64GipTime = TMTimerPollGIP(pVM, &u64Delta);626 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta); 627 627 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 628 628 || VMCPU_FF_ISPENDING(pVCpu, fMask)) -
trunk/src/VBox/VMM/VMM.cpp
r19621 r19660 1281 1281 } 1282 1282 /* Wait until all other VCPUs are waiting for us. */ 1283 while (RTCritSectGetWaiters(&pVM->vmm.s.CritSectSync) != ( pVM->cCPUs - 1))1283 while (RTCritSectGetWaiters(&pVM->vmm.s.CritSectSync) != (int32_t)(pVM->cCPUs - 1)) 1284 1284 RTThreadSleep(1); 1285 1285 … … 1693 1693 c = 0; 1694 1694 f = fGlobalForcedActions; 1695 PRINT_FLAG(VM_FF_,T IMER);1695 PRINT_FLAG(VM_FF_,TM_VIRTUAL_SYNC); 1696 1696 PRINT_FLAG(VM_FF_,PDM_QUEUES); 1697 1697 PRINT_FLAG(VM_FF_,PDM_DMA); … … 1729 1729 { 1730 1730 const uint32_t fLocalForcedActions = pVM->aCpus[i].fLocalForcedActions; 1731 pHlp->pfnPrintf(pHlp, "CPU %u FFs: %#RX32", i, f );1731 pHlp->pfnPrintf(pHlp, "CPU %u FFs: %#RX32", i, fLocalForcedActions); 1732 1732 1733 1733 /* show the flag mnemonics */ … … 1736 1736 PRINT_FLAG(VMCPU_FF_,INTERRUPT_APIC); 1737 1737 PRINT_FLAG(VMCPU_FF_,INTERRUPT_PIC); 1738 PRINT_FLAG(VMCPU_FF_,TIMER); 1738 1739 PRINT_FLAG(VMCPU_FF_,PDM_CRITSECT); 1739 1740 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3); -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r19538 r19660 188 188 tmUnlock(pVM); 189 189 } 190 else if (!VM_FF_ISSET(pVM, VM_FF_TIMER)) /**@todo only do this when arming the timer. */ 191 { 192 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF); 193 VM_FF_SET(pVM, VM_FF_TIMER); 190 else 191 { 192 /** @todo FIXME: don't use FF for scheduling! */ 193 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 194 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) /**@todo only do this when arming the timer. */ 195 { 196 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__)); 197 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 194 198 #ifdef IN_RING3 195 REMR3NotifyTimerPending(pVM); 196 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM); 197 #endif 199 REMR3NotifyTimerPending(pVM, pVCpuDst); 200 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM); 201 #endif 202 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF); 203 } 198 204 } 199 205 } … … 268 274 * This function is called before FFs are checked in the inner execution EM loops. 269 275 * 270 * @returns Virtual timer ticks to the next event. 276 * @returns Virtual timer ticks to the next event. (I.e. 0 means that an timer 277 * has expired or some important rescheduling is pending.) 271 278 * @param pVM Pointer to the shared VM structure. 279 * @param pVCpu Pointer to the shared VMCPU structure of the caller. 272 280 * @thread The emulation thread. 273 281 */ 274 VMMDECL(uint64_t) TMTimerPoll(PVM pVM) 275 { 276 int rc = tmLock(pVM); /* play safe for now */ 282 VMMDECL(uint64_t) TMTimerPoll(PVM pVM, PVMCPU pVCpu) 283 { 284 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */ 285 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 286 STAM_COUNTER_INC(&pVM->tm.s.StatPoll); 277 287 278 288 /* 279 * Return straight away if the timer FF is already set .289 * Return straight away if the timer FF is already set ... 280 290 */ 281 if (VM _FF_ISSET(pVM, VM_FF_TIMER))291 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) 282 292 { 283 293 STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet); 284 #ifndef IN_RING3 285 if (RT_SUCCESS(rc)) 286 #endif 287 tmUnlock(pVM); 288 return 0; 294 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet; 295 } 296 297 /* 298 * ... or if timers are being run. 299 */ 300 if (pVM->tm.s.fRunningQueues) 301 { 302 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning); 303 return s_u64OtherRet; 289 304 } 290 305 … … 292 307 * Get current time and check the expire times of the two relevant queues. 293 308 */ 294 const uint64_t u64Now = TMVirtualGet(pVM); 309 int rc = tmLock(pVM); /** @todo FIXME: Stop playing safe here... */ 310 const uint64_t u64Now = TMVirtualGetNoCheck(pVM); 295 311 296 312 /* 297 313 * TMCLOCK_VIRTUAL 298 314 */ 299 const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;300 const int64_t i64Delta1= u64Expire1 - u64Now;315 const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire; 316 const int64_t i64Delta1 = u64Expire1 - u64Now; 301 317 if (i64Delta1 <= 0) 302 318 { … … 307 323 #endif 308 324 tmUnlock(pVM); 309 VM_FF_SET(pVM, VM_FF_TIMER); 325 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 326 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 310 327 #ifdef IN_RING3 311 REMR3NotifyTimerPending(pVM );312 #endif 313 return 0;328 REMR3NotifyTimerPending(pVM, pVCpuDst); 329 #endif 330 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet; 314 331 } 315 332 … … 345 362 if (i64Delta2 <= 0) 346 363 { 364 if ( !pVM->tm.s.fRunningQueues 365 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) 366 { 367 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 368 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 369 #ifdef IN_RING3 370 REMR3NotifyTimerPending(pVM, pVCpuDst); 371 #endif 372 } 347 373 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync); 348 374 #ifndef IN_RING3 … … 351 377 tmUnlock(pVM); 352 378 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now)); 353 VM_FF_SET(pVM, VM_FF_TIMER); 354 #ifdef IN_RING3 355 REMR3NotifyTimerPending(pVM); 356 #endif 357 return 0; 379 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet; 358 380 } 359 381 if (pVM->tm.s.fVirtualSyncCatchUp) … … 380 402 * 0 if the next event has already expired. 381 403 * @param pVM Pointer to the shared VM structure. 382 * @param pV M Pointer to the shared VM structure.404 * @param pVCpu Pointer to the shared VMCPU structure of the caller. 383 405 * @param pu64Delta Where to store the delta. 384 406 * @thread The emulation thread. 385 407 */ 386 VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, uint64_t *pu64Delta) 387 { 388 int rc = tmLock(pVM); /* play safe for now. */ 408 VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta) 409 { 410 static const uint64_t s_u64OtherRet = 500000000; /* 500 million GIP ticks for non-timer EMTs. */ 411 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 412 const uint64_t u64Now = TMVirtualGetNoCheck(pVM); 413 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIP); 389 414 390 415 /* 391 * Return straight away if the timer FF is already set .416 * Return straight away if the timer FF is already set ... 392 417 */ 393 if (VM_FF_ISSET(pVM, VM_FF_TIMER)) 394 { 395 STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet); 396 #ifndef IN_RING3 397 if (RT_SUCCESS(rc)) 398 #endif 399 tmUnlock(pVM); 400 *pu64Delta = 0; 401 return 0; 418 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) 419 { 420 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPAlreadySet); 421 if (pVCpuDst == pVCpu) 422 { 423 *pu64Delta = 0; 424 return 0; 425 } 426 *pu64Delta = s_u64OtherRet; 427 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet; 402 428 } 403 429 404 430 /* 405 * Get current time and check the expire times of the two relevant queues.431 * ... or if timers are being run. 406 432 */ 407 const uint64_t u64Now = TMVirtualGet(pVM); 433 if (pVM->tm.s.fRunningQueues) 434 { 435 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPRunning); 436 *pu64Delta = s_u64OtherRet; 437 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet; 438 } 439 440 int rc = tmLock(pVM); /** @todo FIXME: Stop playin safe... */ 408 441 409 442 /* 410 * TMCLOCK_VIRTUAL443 * Check for TMCLOCK_VIRTUAL expiration. 411 444 */ 412 445 const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire; … … 414 447 if (i64Delta1 <= 0) 415 448 { 416 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual); 449 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPVirtual); 450 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 451 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 452 #ifdef IN_RING3 453 REMR3NotifyTimerPending(pVM, pVCpuDst); 454 #endif 417 455 #ifndef IN_RING3 418 456 if (RT_SUCCESS(rc)) … … 420 458 tmUnlock(pVM); 421 459 LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now)); 422 VM_FF_SET(pVM, VM_FF_TIMER); 423 #ifdef IN_RING3 424 REMR3NotifyTimerPending(pVM); 425 #endif 426 *pu64Delta = 0; 427 return 0; 460 if (pVCpuDst == pVCpu) 461 { 462 *pu64Delta = 0; 463 return 0; 464 } 465 *pu64Delta = s_u64OtherRet; 466 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet; 428 467 } 429 468 430 469 /* 431 * TMCLOCK_VIRTUAL_SYNC470 * Check for TMCLOCK_VIRTUAL_SYNC expiration. 432 471 * This isn't quite as stright forward if in a catch-up, not only do 433 472 * we have to adjust the 'now' but when have to adjust the delta as well. … … 460 499 if (i64Delta2 <= 0) 461 500 { 462 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync); 501 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TIMER)) 502 { 503 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__)); 504 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER /** @todo poke */); 505 #ifdef IN_RING3 506 REMR3NotifyTimerPending(pVM, pVCpuDst); 507 #endif 508 } 509 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPVirtualSync); 510 463 511 #ifndef IN_RING3 464 512 if (RT_SUCCESS(rc)) … … 466 514 tmUnlock(pVM); 467 515 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now)); 468 VM_FF_SET(pVM, VM_FF_TIMER); 469 #ifdef IN_RING3 470 REMR3NotifyTimerPending(pVM); 471 #endif 472 *pu64Delta = 0; 473 return 0; 516 if (pVCpuDst == pVCpu) 517 { 518 *pu64Delta = 0; 519 return 0; 520 } 521 *pu64Delta = s_u64OtherRet; 522 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet; 474 523 } 475 524 if (pVM->tm.s.fVirtualSyncCatchUp) 476 525 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100); 477 526 478 /* 479 * Return the GIP time of the next event. 480 * This is the reverse of what tmVirtualGetRaw is doing. 481 */ 482 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss); 483 uint64_t u64GipTime = RT_MIN(i64Delta1, i64Delta2); 484 *pu64Delta = u64GipTime; 485 u64GipTime += u64Now + pVM->tm.s.u64VirtualOffset; 486 if (RT_UNLIKELY(!pVM->tm.s.fVirtualWarpDrive)) 487 { 488 u64GipTime -= pVM->tm.s.u64VirtualWarpDriveStart; /* the start is GIP time. */ 489 u64GipTime *= 100; 490 u64GipTime /= pVM->tm.s.u32VirtualWarpDrivePercentage; 491 u64GipTime += pVM->tm.s.u64VirtualWarpDriveStart; 492 } 493 527 uint64_t u64GipTime; 528 if (pVCpuDst == pVCpu) 529 { 530 /* 531 * Return the GIP time of the next event. 532 * This is the reverse of what tmVirtualGetRaw is doing. 533 */ 534 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPMiss); 535 u64GipTime = RT_MIN(i64Delta1, i64Delta2); 536 *pu64Delta = u64GipTime; 537 u64GipTime += u64Now + pVM->tm.s.u64VirtualOffset; 538 if (RT_UNLIKELY(!pVM->tm.s.fVirtualWarpDrive)) 539 { 540 u64GipTime -= pVM->tm.s.u64VirtualWarpDriveStart; /* the start is GIP time. */ 541 u64GipTime *= 100; 542 u64GipTime /= pVM->tm.s.u32VirtualWarpDrivePercentage; 543 u64GipTime += pVM->tm.s.u64VirtualWarpDriveStart; 544 } 545 } 546 else 547 { 548 *pu64Delta = s_u64OtherRet; 549 u64GipTime = u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet; 550 } 494 551 #ifndef IN_RING3 495 552 if (RT_SUCCESS(rc)) -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r19500 r19660 330 330 * Use the chance to check for expired timers. 331 331 */ 332 if ( fCheckTimers 333 && !VM_FF_ISSET(pVM, VM_FF_TIMER) 334 && !pVM->tm.s.fRunningQueues 335 && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64 336 || ( pVM->tm.s.fVirtualSyncTicking 337 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync 332 if (fCheckTimers) 333 { 334 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 335 if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER) 336 && !pVM->tm.s.fRunningQueues 337 && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64 338 || ( pVM->tm.s.fVirtualSyncTicking 339 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync 340 ) 338 341 ) 339 ) 340 ) 341 { 342 VM_FF_SET(pVM, VM_FF_TIMER); 343 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF); 342 && !pVM->tm.s.fRunningQueues 343 ) 344 { 345 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF); 346 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 347 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 344 348 #ifdef IN_RING3 345 REMR3NotifyTimerPending(pVM); 346 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM); 347 #endif 349 REMR3NotifyTimerPending(pVM, pVCpuDst); 350 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM); 351 #endif 352 } 348 353 } 349 354 } … … 372 377 373 378 /** 374 * Gets the current TMCLOCK_VIRTUAL time 379 * Gets the current TMCLOCK_VIRTUAL time without checking 380 * timers or anything. 381 * 382 * Meaning, this has no side effect on FFs like TMVirtualGet may have. 375 383 * 376 384 * @returns The timestamp. 377 * @param pVM VM handle. 378 * @param fCheckTimers Check timers or not 379 * 380 * @remark While the flow of time will never go backwards, the speed of the 381 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be 382 * influenced by power saving (SpeedStep, PowerNow!), while the former 383 * makes use of TSC and kernel timers. 384 */ 385 VMMDECL(uint64_t) TMVirtualGetEx(PVM pVM, bool fCheckTimers) 386 { 387 return tmVirtualGet(pVM, fCheckTimers); 385 * @param pVM VM handle. 386 * 387 * @remarks See TMVirtualGet. 388 */ 389 VMMDECL(uint64_t) TMVirtualGetNoCheck(PVM pVM) 390 { 391 return tmVirtualGet(pVM, false /*fCheckTimers*/); 388 392 } 389 393 … … 397 401 * @thread EMT. 398 402 */ 399 VMMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers) 400 { 401 uint64_t u64; 403 DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVM pVM, bool fCheckTimers) 404 { 405 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSync); 406 uint64_t u64; 407 402 408 if (pVM->tm.s.fVirtualSyncTicking) 403 409 { 404 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSync);410 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 405 411 406 412 /* … … 409 415 Assert(pVM->tm.s.cVirtualTicking); 410 416 u64 = tmVirtualGetRaw(pVM); 411 if ( fCheckTimers 412 && !VM_FF_ISSET(pVM, VM_FF_TIMER) 413 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64) 417 if (fCheckTimers) 414 418 { 415 VM_FF_SET(pVM, VM_FF_TIMER); 419 if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER) 420 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64) 421 { 422 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__)); 423 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 416 424 #ifdef IN_RING3 417 REMR3NotifyTimerPending(pVM); 418 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM); 419 #endif 420 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF); 425 REMR3NotifyTimerPending(pVM, pVCpuDst); 426 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/); 427 #endif 428 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF); 429 } 421 430 } 422 431 … … 490 499 { 491 500 u64 = u64Expire; 492 int rc = tmTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. */501 int rc = tmTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. FIXME */ 493 502 if (RT_SUCCESS(rc)) 494 503 { 495 504 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64); 496 505 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false); 506 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); 497 507 tmUnlock(pVM); 498 508 } 499 509 if ( fCheckTimers 500 && !VM _FF_ISSET(pVM, VM_FF_TIMER))510 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) 501 511 { 502 VM_FF_SET(pVM, VM_FF_TIMER); 512 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 513 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 503 514 #ifdef IN_RING3 504 REMR3NotifyTimerPending(pVM );505 VMR3Notify GlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);515 REMR3NotifyTimerPending(pVM, pVCpuDst); 516 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM); 506 517 #endif 507 518 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF); … … 516 527 u64 = pVM->tm.s.u64VirtualSync; 517 528 518 /* 519 * If it looks like a halt caused by pending timers, make sure the FF is raised. 520 * This is a safeguard against timer queue runner leaving the virtual sync clock stopped. 521 */ 522 if ( fCheckTimers 523 && pVM->tm.s.cVirtualTicking 524 && !VM_FF_ISSET(pVM, VM_FF_TIMER)) 525 { 526 const uint64_t u64Expire = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire; 527 if (u64 >= u64Expire) 528 { 529 VM_FF_SET(pVM, VM_FF_TIMER); 530 #ifdef IN_RING3 531 REMR3NotifyTimerPending(pVM); 532 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM); 533 #endif 534 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF); 535 Log4(("TM: %RU64/%RU64: exp tmr=>ff (!)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 536 } 537 } 538 } 529 } 530 539 531 return u64; 540 532 } … … 547 539 * @param pVM VM handle. 548 540 * @thread EMT. 541 * @remarks May set the timer and virtual sync FFs. 549 542 */ 550 543 VMMDECL(uint64_t) TMVirtualSyncGet(PVM pVM) 551 544 { 552 return TMVirtualSyncGetEx(pVM, true /* check timers */); 545 return tmVirtualSyncGetEx(pVM, true /* check timers */); 546 } 547 548 549 /** 550 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on 551 * TMCLOCK_VIRTUAL. 552 * 553 * @returns The timestamp. 554 * @param pVM VM handle. 555 * @thread EMT. 556 * @remarks May set the timer and virtual sync FFs. 557 */ 558 VMMDECL(uint64_t) TMVirtualSyncGetNoCheck(PVM pVM) 559 { 560 return tmVirtualSyncGetEx(pVM, false /* check timers */); 561 } 562 563 564 /** 565 * Gets the current TMCLOCK_VIRTUAL_SYNC time. 566 * 567 * @returns The timestamp. 568 * @param pVM VM handle. 569 * @param fCheckTimers Check timers on the virtual clock or not. 570 * @thread EMT. 571 * @remarks May set the timer and virtual sync FFs. 572 */ 573 VMMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers) 574 { 575 return tmVirtualSyncGetEx(pVM, fCheckTimers); 553 576 } 554 577 -
trunk/src/VBox/VMM/VMMGC/TRPMGCHandlers.cpp
r19288 r19660 157 157 if (!(++s_iTimerPoll & 0xf)) 158 158 { 159 uint64_t cTicks = TMTimerPoll(pVM); NOREF(cTicks); 160 Log2(("TMTimerPoll at %08RX32 returned %RX64 (VM_FF_TIMER=%d)\n", pRegFrame->eip, cTicks, VM_FF_ISPENDING(pVM, VM_FF_TIMER))); 159 uint64_t cTicks = TMTimerPoll(pVM, pVCpu); NOREF(cTicks); 160 Log2(("TMTimerPoll at %08RX32 returned %RX64 (VM_FF_TM_VIRTUAL_SYNC=%d VM_FF_TM_VIRTUAL_SYNC=%d)\n", pRegFrame->eip, cTicks, 161 VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC), VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER))); 161 162 } 162 163 } … … 185 186 */ 186 187 if ( rc == VINF_SUCCESS 187 && ( VM_FF_ISPENDING(pVM, VM_FF_T IMER| VM_FF_REQUEST | VM_FF_PGM_NO_MEMORY)188 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_T O_R3 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)188 && ( VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC | VM_FF_REQUEST | VM_FF_PGM_NO_MEMORY) 189 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_TO_R3 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) 189 190 ) 190 191 ) … … 200 201 } 201 202 /* Pending timer action. */ 202 else if (VM _FF_ISPENDING(pVM, VM_FF_TIMER))203 else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)) 203 204 rc = VINF_EM_RAW_TIMER_PENDING; 205 /* The Virtual Sync clock has stopped. */ 206 else if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC)) 207 rc = VINF_EM_RAW_TO_R3; 204 208 /* Pending interrupt: dispatch it. */ 205 209 else if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r19657 r19660 1662 1662 } 1663 1663 1664 1664 1665 /** 1665 1666 * Wakes up the halted EMT thread so it can service a pending request. -
trunk/src/VBox/VMM/VMMTests.cpp
r19463 r19660 505 505 506 506 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); 507 VM_FF_CLEAR(pVM, VM_FF_TIMER); 507 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER); 508 VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC); 508 509 VM_FF_CLEAR(pVM, VM_FF_REQUEST); 509 510 … … 556 557 557 558 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); 558 VM_FF_CLEAR(pVM, VM_FF_TIMER); 559 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER); 560 VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC); 559 561 560 562 uint64_t TickThisStart = ASMReadTSC(); -
trunk/src/recompiler/VBoxREMWrapper.cpp
r19639 r19660 355 355 static DECLCALLBACKPTR(void, pfnREMR3NotifyInterruptSet)(PVM, PVMCPU); 356 356 static DECLCALLBACKPTR(void, pfnREMR3NotifyInterruptClear)(PVM, PVMCPU); 357 static DECLCALLBACKPTR(void, pfnREMR3NotifyTimerPending)(PVM );357 static DECLCALLBACKPTR(void, pfnREMR3NotifyTimerPending)(PVM, PVMCPU); 358 358 static DECLCALLBACKPTR(void, pfnREMR3NotifyDmaPending)(PVM); 359 359 static DECLCALLBACKPTR(void, pfnREMR3NotifyQueuePending)(PVM); … … 1058 1058 { "REMR3NotifyInterruptSet", (void *)&pfnREMR3NotifyInterruptSet, &g_aArgsVMandVMCPU[0], RT_ELEMENTS(g_aArgsVMandVMCPU), REMFNDESC_FLAGS_RET_VOID, 0, NULL }, 1059 1059 { "REMR3NotifyInterruptClear", (void *)&pfnREMR3NotifyInterruptClear, &g_aArgsVMandVMCPU[0], RT_ELEMENTS(g_aArgsVMandVMCPU), REMFNDESC_FLAGS_RET_VOID, 0, NULL }, 1060 { "REMR3NotifyTimerPending", (void *)&pfnREMR3NotifyTimerPending, &g_aArgsVM [0], RT_ELEMENTS(g_aArgsVM),REMFNDESC_FLAGS_RET_VOID, 0, NULL },1060 { "REMR3NotifyTimerPending", (void *)&pfnREMR3NotifyTimerPending, &g_aArgsVMandVMCPU[0], RT_ELEMENTS(g_aArgsVMandVMCPU), REMFNDESC_FLAGS_RET_VOID, 0, NULL }, 1061 1061 { "REMR3NotifyDmaPending", (void *)&pfnREMR3NotifyDmaPending, &g_aArgsVM[0], RT_ELEMENTS(g_aArgsVM), REMFNDESC_FLAGS_RET_VOID, 0, NULL }, 1062 1062 { "REMR3NotifyQueuePending", (void *)&pfnREMR3NotifyQueuePending, &g_aArgsVM[0], RT_ELEMENTS(g_aArgsVM), REMFNDESC_FLAGS_RET_VOID, 0, NULL }, … … 1166 1166 { "TMNotifyEndOfExecution", (void *)(uintptr_t)&TMNotifyEndOfExecution, &g_aArgsVMCPU[0], RT_ELEMENTS(g_aArgsVMCPU), REMFNDESC_FLAGS_RET_VOID, 0, NULL }, 1167 1167 { "TMNotifyStartOfExecution", (void *)(uintptr_t)&TMNotifyStartOfExecution, &g_aArgsVMCPU[0], RT_ELEMENTS(g_aArgsVMCPU), REMFNDESC_FLAGS_RET_VOID, 0, NULL }, 1168 { "TMTimerPoll", (void *)(uintptr_t)&TMTimerPoll, &g_aArgsVM [0], RT_ELEMENTS(g_aArgsVM),REMFNDESC_FLAGS_RET_INT, sizeof(uint64_t), NULL },1168 { "TMTimerPoll", (void *)(uintptr_t)&TMTimerPoll, &g_aArgsVMandVMCPU[0], RT_ELEMENTS(g_aArgsVMandVMCPU), REMFNDESC_FLAGS_RET_INT, sizeof(uint64_t), NULL }, 1169 1169 { "TMR3TimerQueuesDo", (void *)(uintptr_t)&TMR3TimerQueuesDo, &g_aArgsVM[0], RT_ELEMENTS(g_aArgsVM), REMFNDESC_FLAGS_RET_VOID, 0, NULL }, 1170 1170 { "TMVirtualPause", (void *)(uintptr_t)&TMVirtualPause, &g_aArgsVM[0], RT_ELEMENTS(g_aArgsVM), REMFNDESC_FLAGS_RET_INT, sizeof(int), NULL }, … … 2280 2280 } 2281 2281 2282 REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM )2282 REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst) 2283 2283 { 2284 2284 #ifndef USE_REM_STUBS 2285 2285 Assert(VALID_PTR(pfnREMR3NotifyTimerPending)); 2286 pfnREMR3NotifyTimerPending(pVM );2286 pfnREMR3NotifyTimerPending(pVM, pVCpuDst); 2287 2287 #endif 2288 2288 } -
trunk/src/recompiler/VBoxRecompiler.c
r19639 r19660 1650 1650 { 1651 1651 LogFlow(("remR3TimersRun:\n")); 1652 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n")); 1652 1653 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE); 1653 1654 remR3ProfileStart(STATS_QEMU_RUN_TIMERS); … … 3836 3837 * 3837 3838 * @param pVM VM Handle. 3839 * @param pVCpuDst The target cpu for this notification. 3840 * TM will not broadcast pending timer events, but use 3841 * a decidated EMT for them. So, only interrupt REM 3842 * execution if the given CPU is executing in REM. 3838 3843 * @thread Any. 3839 3844 */ 3840 REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM )3845 REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst) 3841 3846 { 3842 3847 #ifndef DEBUG_bird … … 3845 3850 if (pVM->rem.s.fInREM) 3846 3851 { 3847 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, 3848 CPU_INTERRUPT_EXTERNAL_TIMER); 3849 } 3852 if (pVM->rem.s.Env.pVCpu == pVCpuDst) 3853 { 3854 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n")); 3855 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request, 3856 CPU_INTERRUPT_EXTERNAL_TIMER); 3857 } 3858 else 3859 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst)); 3860 } 3861 else 3862 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst))); 3850 3863 } 3851 3864 -
trunk/src/recompiler/cpu-exec.c
r19369 r19660 525 525 } 526 526 #ifdef VBOX_HIGH_RES_TIMERS_HACK 527 /* NULL the current_tb here so cpu_interrupt() doesn't do 528 anything unnecessary (like crashing during emulate single instruction). */ 527 /* NULL the current_tb here so cpu_interrupt() doesn't do anything 528 unnecessary (like crashing during emulate single instruction). 529 Note! Don't use env1->pVM here, the code wouldn't run with 530 gcc-4.4/amd64 anymore, see #3883. */ 529 531 env->current_tb = NULL; 530 /* don't use env1->pVM here, the code wouldn't run with gcc-4.4/amd64 531 * anymore, see #3883 */ 532 TMTimerPoll(env->pVM); 532 if ( !(env->interrupt_request & ( CPU_INTERRUPT_EXIT | CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC 533 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT)) 534 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER) 535 || !TMTimerPoll(env->pVM, env->pVCpu)) ) { 536 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER); 537 remR3ProfileStart(STATS_QEMU_RUN_TIMERS); 538 TMR3TimerQueuesDo(env->pVM); 539 remR3ProfileStop(STATS_QEMU_RUN_TIMERS); 540 } 533 541 #endif 534 542 } /* for(;;) */
Note:
See TracChangeset
for help on using the changeset viewer.