- Timestamp:
- Apr 27, 2009 3:00:59 PM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 46584
- Location:
- trunk
- Files:
-
- 17 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/types.h
r18101 r19217 90 90 /** Pointer to a ring-3 (user mode) VM structure. */ 91 91 typedef R3PTRTYPE(struct UVM *) PUVM; 92 93 /** Pointer to a ring-3 (user mode) VMCPU structure. */ 94 typedef R3PTRTYPE(struct UVMCPU *) PUVMCPU; 92 95 93 96 /** Virtual CPU ID. */ -
trunk/include/VBox/uvm.h
r19101 r19217 43 43 /** Pointer to the UVM structure. */ 44 44 PUVM pUVM; 45 /** Pointer to the VM structure. */ 46 PVM pVM; 47 /** Pointer to the VMCPU structure. */ 48 PVMCPU pVCpu; 45 49 /** The virtual CPU ID. */ 46 50 RTCPUID idCpu; -
trunk/include/VBox/vm.h
r19178 r19217 86 86 VMCPUSTATE volatile enmState; 87 87 88 /** Pointer to the ring-3 UVMCPU structure. */ 89 PUVMCPU pUVCpu; 88 90 /** Ring-3 Host Context VM Pointer. */ 89 91 PVMR3 pVMR3; … … 105 107 * following it (to grow into and align the struct size). 106 108 * */ 107 uint32_t au32Alignment[HC_ARCH_BITS == 32 ? 9 : 6];109 uint32_t au32Alignment[HC_ARCH_BITS == 32 ? 8 : 4]; 108 110 109 111 /** CPUM part. */ … … 283 285 #define VMCPU_FF_NORMAL_PRIORITY_POST_MASK (VMCPU_FF_CSAM_SCAN_PAGE) 284 286 285 /** Normal priority actions. */287 /** Normal priority VM actions. */ 286 288 #define VM_FF_NORMAL_PRIORITY_MASK (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY) 289 /** Normal priority VMCPU actions. */ 290 #define VMCPU_FF_NORMAL_PRIORITY_MASK (VMCPU_FF_REQUEST) 287 291 288 292 /** Flags to clear before resuming guest execution. */ … … 324 328 * @param fFlag The flag to set. 325 329 */ 326 #if 1 //def VBOX_WITH_SMP_GUESTS 327 # define VMCPU_FF_SET(pVCpu, fFlag) ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag)) 328 #else 329 # define VMCPU_FF_SET(pVCpu, fFlag) ASMAtomicOrU32(&(pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions, (fFlag)) 330 #endif 330 #define VMCPU_FF_SET(pVCpu, fFlag) ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag)) 331 331 332 332 /** @def VM_FF_CLEAR … … 351 351 * @param fFlag The flag to clear. 352 352 */ 353 #if 1 //def VBOX_WITH_SMP_GUESTS 354 # define VMCPU_FF_CLEAR(pVCpu, fFlag) ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag)) 355 #else 356 # define VMCPU_FF_CLEAR(pVCpu, fFlag) ASMAtomicAndU32(&(pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions, ~(fFlag)) 357 #endif 353 #define VMCPU_FF_CLEAR(pVCpu, fFlag) ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag)) 358 354 359 355 /** @def VM_FF_ISSET … … 371 367 * @param fFlag The flag to check. 372 368 */ 373 #if 1 //def VBOX_WITH_SMP_GUESTS 374 # define VMCPU_FF_ISSET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag)) 375 #else 376 # define VMCPU_FF_ISSET(pVCpu, fFlag) (((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fFlag)) == (fFlag)) 377 #endif 369 #define VMCPU_FF_ISSET(pVCpu, fFlag) (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag)) 378 370 379 371 /** @def VM_FF_ISPENDING … … 391 383 * @param fFlags The flags to check for. 392 384 */ 393 #if 1 //def VBOX_WITH_SMP_GUESTS 394 # define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags)) 395 #else 396 # define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fFlags)) 397 #endif 385 #define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags)) 398 386 399 387 /** @def VM_FF_ISPENDING … … 415 403 * @param fExcpt The flags that should not be set. 416 404 */ 417 #if 1 //def VBOX_WITH_SMP_GUESTS 418 # define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) ) 419 #else 420 # define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fFlags)) && !((pVCpu)->CTX_SUFF(pVM)->fGlobalForcedActions & (fExcpt)) ) 421 #endif 405 #define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) ) 422 406 423 407 /** @def VM_IS_EMT -
trunk/include/VBox/vm.mac
r19141 r19217 107 107 .fLocalForcedActions resd 1 108 108 .enmState resd 1 109 .pUVCpu RTR3PTR_RES 1 109 110 .pVMR3 RTR3PTR_RES 1 110 111 .pVMR0 RTR0PTR_RES 1 -
trunk/include/VBox/vmapi.h
r19173 r19217 434 434 VMMR3DECL(int) VMR3ReqWait(PVMREQ pReq, unsigned cMillies); 435 435 VMMR3DECL(int) VMR3ReqProcessU(PUVM pUVM, VMREQDEST enmDest); 436 VMMR3DECL(void) VMR3NotifyFF(PVM pVM, bool fNotifiedREM); 437 VMMR3DECL(void) VMR3NotifyFFU(PUVM pUVM, bool fNotifiedREM); 436 VMMR3DECL(void) VMR3NotifyGlobalFF(PVM pVM, bool fNotifiedREM); 437 VMMR3DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, bool fNotifiedREM); 438 VMMR3DECL(void) VMR3NotifyCpuFF(PVMCPU pVCpu, bool fNotifiedREM); 439 VMMR3DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVMCpu, bool fNotifiedREM); 438 440 VMMR3DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts); 439 VMMR3DECL(int) VMR3WaitU(PUVM pUVM);441 VMMR3DECL(int) VMR3WaitU(PUVMCPU pUVMCpu); 440 442 VMMR3DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM); 441 443 VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PVM pVM); -
trunk/src/VBox/VMM/DBGF.cpp
r19141 r19217 123 123 rc = (DBGFCMD)ASMAtomicXchgU32((uint32_t volatile *)(void *)&pVM->dbgf.s.enmVMMCmd, enmCmd); 124 124 VM_FF_SET(pVM, VM_FF_DBGF); 125 VMR3Notify FF(pVM, false /* didn't notify REM */);125 VMR3NotifyGlobalFF(pVM, false /* didn't notify REM */); 126 126 } 127 127 return rc; -
trunk/src/VBox/VMM/EM.cpp
r19151 r19217 3361 3361 3362 3362 /* 3363 * Normal priority then. (per-VCPU) 3364 * (Executed in no particular order.) 3365 */ 3366 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY) 3367 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK)) 3368 { 3369 /* 3370 * Requests from other threads. 3371 */ 3372 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY)) 3373 { 3374 rc2 = VMR3ReqProcessU(pVM->pUVM, (VMREQDEST)pVCpu->idCpu); 3375 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) 3376 { 3377 Log2(("emR3ForcedActions: returns %Rrc\n", rc2)); 3378 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a); 3379 return rc2; 3380 } 3381 UPDATE_RC(); 3382 } 3383 3384 /* check that we got them all */ 3385 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST))); 3386 } 3387 3388 /* 3363 3389 * High priority pre execution chunk last. 3364 3390 * (Executed in ascending priority order.) -
trunk/src/VBox/VMM/PDMDevHlp.cpp
r19076 r19217 2480 2480 VM_FF_SET(pVM, VM_FF_PDM_DMA); 2481 2481 REMR3NotifyDmaPending(pVM); 2482 VMR3Notify FF(pVM, true);2482 VMR3NotifyGlobalFF(pVM, true); 2483 2483 } 2484 2484 -
trunk/src/VBox/VMM/PDMDevMiscHlp.cpp
r19141 r19217 55 55 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); 56 56 REMR3NotifyInterruptSet(pVM, pVCpu); 57 VMR3Notify FF(pVM, true); /** @todo SMP: notify the right cpu. */57 VMR3NotifyCpuFF(pVCpu, true); 58 58 } 59 59 … … 158 158 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC); 159 159 REMR3NotifyInterruptSet(pVM, pVCpu); 160 VMR3Notify FF(pVM, true); /** @todo SMP: notify the right cpu. */160 VMR3NotifyCpuFF(pVCpu, true); 161 161 } 162 162 -
trunk/src/VBox/VMM/TM.cpp
r19032 r19217 1480 1480 VM_FF_SET(pVM, VM_FF_TIMER); 1481 1481 REMR3NotifyTimerPending(pVM); 1482 VMR3Notify FF(pVM, true);1482 VMR3NotifyGlobalFF(pVM, true); 1483 1483 STAM_COUNTER_INC(&pVM->tm.s.StatTimerCallbackSetFF); 1484 1484 } -
trunk/src/VBox/VMM/VM.cpp
r19141 r19217 243 243 */ 244 244 PVMREQ pReq; 245 /** @todo SMP: VMREQDEST_ANY -> VMREQDEST_CPU0 */ 246 rc = VMR3ReqCallU(pUVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, 0, (PFNRT)vmR3CreateU, 245 rc = VMR3ReqCallU(pUVM, VMREQDEST_ANY /* can't use CPU0 here as it's too early (pVM==0) */, &pReq, RT_INDEFINITE_WAIT, 0, (PFNRT)vmR3CreateU, 247 246 4, pUVM, cCPUs, pfnCFGMConstructor, pvUserCFGM); 248 247 if (RT_SUCCESS(rc)) … … 400 399 401 400 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding)); 402 AssertRelease(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));403 401 404 402 pUVM->vm.s.ppAtResetNext = &pUVM->vm.s.pAtReset; … … 406 404 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError; 407 405 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError; 406 408 407 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP; 409 408 … … 411 410 for (i = 0; i < cCPUs; i++) 412 411 { 413 pUVM->aCpus[i].pUVM = pUVM;414 pUVM->aCpus[i].idCpu = i;412 pUVM->aCpus[i].pUVM = pUVM; 413 pUVM->aCpus[i].idCpu = i; 415 414 } 416 415 … … 420 419 if (RT_SUCCESS(rc)) 421 420 { 422 rc = RTSemEventCreate(&pUVM->vm.s.EventSemWait); 421 /* Allocate a halt method event semaphore for each VCPU. */ 422 for (i = 0; i < cCPUs; i++) 423 { 424 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait); 425 if (RT_FAILURE(rc)) 426 break; 427 } 428 423 429 if (RT_SUCCESS(rc)) 424 430 { … … 465 471 STAMR3TermUVM(pUVM); 466 472 } 467 RTSemEventDestroy(pUVM->vm.s.EventSemWait); 473 for (i = 0; i < cCPUs; i++) 474 { 475 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait); 476 } 468 477 } 469 478 RTTlsFree(pUVM->vm.s.idxTLS); … … 526 535 for (uint32_t i = 0; i < pVM->cCPUs; i++) 527 536 { 537 pVM->aCpus[i].pUVCpu = &pUVM->aCpus[i]; 528 538 pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT; 529 539 Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD); 540 541 pUVM->aCpus[i].pVM = pVM; 542 pUVM->aCpus[i].pVCpu = &pVM->aCpus[i]; 530 543 } 531 544 … … 686 699 STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC."); 687 700 688 STAM_REL_REG(pVM, &pUVM->vm.s.StatHaltYield, STAMTYPE_PROFILE, "/PROF/VM/Halt/Yield", STAMUNIT_TICKS_PER_CALL, "Profiling halted state yielding."); 689 STAM_REL_REG(pVM, &pUVM->vm.s.StatHaltBlock, STAMTYPE_PROFILE, "/PROF/VM/Halt/Block", STAMUNIT_TICKS_PER_CALL, "Profiling halted state blocking."); 690 STAM_REL_REG(pVM, &pUVM->vm.s.StatHaltTimers,STAMTYPE_PROFILE, "/PROF/VM/Halt/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling halted state timer tasks."); 701 for (unsigned iCpu=0;iCpu<pVM->cCPUs;iCpu++) 702 { 703 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[iCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", iCpu); 704 AssertRC(rc); 705 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[iCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", iCpu); 706 AssertRC(rc); 707 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[iCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", iCpu); 708 AssertRC(rc); 709 } 691 710 692 711 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet."); … … 1770 1789 if (pUVM->pVM) 1771 1790 VM_FF_SET(pUVM->pVM, VM_FF_TERMINATE); 1772 VMR3NotifyFFU(pUVM, true /* fNotifiedREM */); 1773 if (pUVM->aCpus[i].vm.s.EventSemWait != NIL_RTSEMEVENT) /** @todo remove test when we start initializing it! */ 1774 RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait); 1775 } 1776 RTSemEventSignal(pUVM->vm.s.EventSemWait); 1791 VMR3NotifyGlobalFFU(pUVM, true /* fNotifiedREM */); 1792 RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait); 1793 } 1777 1794 1778 1795 /* Wait for them. */ … … 1802 1819 /* Cleanup the semaphores. */ 1803 1820 for (VMCPUID i = 0; i < pUVM->cCpus; i++) 1804 if (pUVM->aCpus[i].vm.s.EventSemWait != NIL_RTSEMEVENT) /** @todo remove test when we start initializing it! */ 1805 { 1806 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait); 1807 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT; 1808 } 1809 RTSemEventDestroy(pUVM->vm.s.EventSemWait); 1810 pUVM->vm.s.EventSemWait = NIL_RTSEMEVENT; 1821 { 1822 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait); 1823 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT; 1824 } 1811 1825 1812 1826 /* … … 3275 3289 VMMR3DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM) 3276 3290 { 3277 PUVMCPU pUV MCPU= (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);3278 3279 AssertMsg(pUV MCPU, ("RTTlsGet %d failed!\n", pVM->pUVM->vm.s.idxTLS));3280 return pUV MCPU->idCpu;3291 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS); 3292 3293 AssertMsg(pUVCpu, ("RTTlsGet %d failed!\n", pVM->pUVM->vm.s.idxTLS)); 3294 return pUVCpu->idCpu; 3281 3295 } 3282 3296 … … 3291 3305 VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM) 3292 3306 { 3293 PUVMCPU pUV MCPU= (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);3294 3295 if (!pUV MCPU)3307 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS); 3308 3309 if (!pUVCpu) 3296 3310 return NIL_RTNATIVETHREAD; 3297 3311 3298 return pUV MCPU->vm.s.NativeThreadEMT;3312 return pUVCpu->vm.s.NativeThreadEMT; 3299 3313 } 3300 3314 … … 3309 3323 VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM) 3310 3324 { 3311 PUVMCPU pUV MCPU= (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);3312 3313 if (!pUV MCPU)3325 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS); 3326 3327 if (!pUVCpu) 3314 3328 return NIL_RTNATIVETHREAD; 3315 3329 3316 return pUV MCPU->vm.s.NativeThreadEMT;3330 return pUVCpu->vm.s.NativeThreadEMT; 3317 3331 } 3318 3332 … … 3327 3341 VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PVM pVM) 3328 3342 { 3329 PUVMCPU pUV MCPU= (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);3330 3331 if (!pUV MCPU)3343 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS); 3344 3345 if (!pUVCpu) 3332 3346 return NIL_RTTHREAD; 3333 3347 3334 return pUV MCPU->vm.s.ThreadEMT;3348 return pUVCpu->vm.s.ThreadEMT; 3335 3349 } 3336 3350 … … 3345 3359 VMMR3DECL(RTTHREAD) VMR3GetVMCPUThreadU(PUVM pUVM) 3346 3360 { 3347 PUVMCPU pUV MCPU= (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);3348 3349 if (!pUV MCPU)3361 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS); 3362 3363 if (!pUVCpu) 3350 3364 return NIL_RTTHREAD; 3351 3365 3352 return pUV MCPU->vm.s.ThreadEMT;3353 } 3354 3366 return pUVCpu->vm.s.ThreadEMT; 3367 } 3368 -
trunk/src/VBox/VMM/VMEmt.cpp
r19141 r19217 54 54 DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArgs) 55 55 { 56 PUVMCPU pUV MCPU= (PUVMCPU)pvArgs;57 PUVM pUVM = pUV MCPU->pUVM;58 RTCPUID idCpu = pUV MCPU->idCpu;56 PUVMCPU pUVCpu = (PUVMCPU)pvArgs; 57 PUVM pUVM = pUVCpu->pUVM; 58 RTCPUID idCpu = pUVCpu->idCpu; 59 59 int rc; 60 60 … … 62 62 ("Invalid arguments to the emulation thread!\n")); 63 63 64 rc = RTTlsSet(pUVM->vm.s.idxTLS, pUV MCPU);64 rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVCpu); 65 65 AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc); 66 66 … … 74 74 { 75 75 /* Requested to exit the EMT thread out of sync? (currently only VMR3WaitForResume) */ 76 if (setjmp(pUV MCPU->vm.s.emtJumpEnv) != 0)76 if (setjmp(pUVCpu->vm.s.emtJumpEnv) != 0) 77 77 { 78 78 rc = VINF_SUCCESS; … … 94 94 break; 95 95 } 96 96 97 if (pUVM->vm.s.pReqs) 97 98 { … … 103 104 } 104 105 else 106 if (pUVCpu->vm.s.pReqs) 107 { 108 /* 109 * Service execute in EMT request. 110 */ 111 rc = VMR3ReqProcessU(pUVM, (VMREQDEST)pUVCpu->idCpu); 112 Log(("vmR3EmulationThread: Req (cpu=%d) rc=%Rrc, VM state %d -> %d\n", pUVCpu->idCpu, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING)); 113 } 114 else 105 115 { 106 116 /* 107 117 * Nothing important is pending, so wait for something. 108 118 */ 109 rc = VMR3WaitU(pUV M);119 rc = VMR3WaitU(pUVCpu); 110 120 if (RT_FAILURE(rc)) 111 121 break; … … 137 147 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState)); 138 148 } 149 else if (pUVCpu->vm.s.pReqs) 150 { 151 /* 152 * Service execute in EMT request. 153 */ 154 rc = VMR3ReqProcessU(pUVM, (VMREQDEST)pUVCpu->idCpu); 155 Log(("vmR3EmulationThread: Req (cpu=%d)rc=%Rrc, VM state %d -> %d\n", pUVCpu->idCpu, rc, enmBefore, pVM->enmVMState)); 156 } 139 157 else if (VM_FF_ISSET(pVM, VM_FF_DBGF)) 140 158 { … … 159 177 * Nothing important is pending, so wait for something. 160 178 */ 161 rc = VMR3WaitU(pUV M);179 rc = VMR3WaitU(pUVCpu); 162 180 if (RT_FAILURE(rc)) 163 181 break; … … 212 230 vmR3DestroyFinalBitFromEMT(pUVM); 213 231 214 pUV MCPU->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;232 pUVCpu->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD; 215 233 } 216 234 Log(("vmR3EmulationThread: EMT is terminated.\n")); … … 231 249 * The request loop. 232 250 */ 233 PUVMCPU pUV MCPU;251 PUVMCPU pUVCpu; 234 252 PUVM pUVM = pVM->pUVM; 235 253 VMSTATE enmBefore; 236 254 int rc; 237 255 238 pUV MCPU= (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);239 AssertReturn(pUV MCPU, VERR_INTERNAL_ERROR);256 pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS); 257 AssertReturn(pUVCpu, VERR_INTERNAL_ERROR); 240 258 241 259 for (;;) … … 262 280 rc = VMR3ReqProcessU(pUVM, VMREQDEST_ANY); 263 281 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState)); 282 } 283 else if (pUVCpu->vm.s.pReqs) 284 { 285 /* 286 * Service execute in EMT request. 287 */ 288 rc = VMR3ReqProcessU(pUVM, (VMREQDEST)pUVCpu->idCpu); 289 Log(("vmR3EmulationThread: Req (cpu=%d)rc=%Rrc, VM state %d -> %d\n", pUVCpu->idCpu, rc, enmBefore, pVM->enmVMState)); 264 290 } 265 291 else if (VM_FF_ISSET(pVM, VM_FF_DBGF)) … … 285 311 * Nothing important is pending, so wait for something. 286 312 */ 287 rc = VMR3WaitU(pUV M);313 rc = VMR3WaitU(pUVCpu); 288 314 if (RT_FAILURE(rc)) 289 315 break; … … 314 340 315 341 /* Return to the main loop in vmR3EmulationThread, which will clean up for us. */ 316 longjmp(pUV MCPU->vm.s.emtJumpEnv, 1);342 longjmp(pUVCpu->vm.s.emtJumpEnv, 1); 317 343 } 318 344 … … 341 367 /** 342 368 * The old halt loop. 343 * 344 * @param pUVM Pointer to the user mode VM structure. 345 */ 346 static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVM pUVM, PVMCPU pVCpu, const uint32_t fMask, uint64_t /* u64Now*/) 369 */ 370 static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t /* u64Now*/) 347 371 { 348 372 /* 349 373 * Halt loop. 350 374 */ 351 PVM pVM = pUVM->pVM; 375 PVM pVM = pUVCpu->pVM; 376 PVMCPU pVCpu = pUVCpu->pVCpu; 377 352 378 int rc = VINF_SUCCESS; 353 ASMAtomicWriteBool(&pUV M->vm.s.fWait, true);379 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true); 354 380 //unsigned cLoops = 0; 355 381 for (;;) … … 360 386 * addition to perhaps set an FF. 361 387 */ 362 STAM_REL_PROFILE_START(&pUV M->vm.s.StatHaltTimers, b);388 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b); 363 389 TMR3TimerQueuesDo(pVM); 364 STAM_REL_PROFILE_STOP(&pUV M->vm.s.StatHaltTimers, b);390 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b); 365 391 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 366 392 || VMCPU_FF_ISPENDING(pVCpu, fMask)) … … 387 413 { 388 414 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++); 389 STAM_REL_PROFILE_START(&pUV M->vm.s.StatHaltYield, a);415 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltYield, a); 390 416 RTThreadYield(); /* this is the best we can do here */ 391 STAM_REL_PROFILE_STOP(&pUV M->vm.s.StatHaltYield, a);417 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltYield, a); 392 418 } 393 419 else if (u64NanoTS < 2000000) 394 420 { 395 421 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++); 396 STAM_REL_PROFILE_START(&pUV M->vm.s.StatHaltBlock, a);397 rc = RTSemEventWait(pUV M->vm.s.EventSemWait, 1);398 STAM_REL_PROFILE_STOP(&pUV M->vm.s.StatHaltBlock, a);422 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a); 423 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1); 424 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a); 399 425 } 400 426 else 401 427 { 402 428 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15)); 403 STAM_REL_PROFILE_START(&pUV M->vm.s.StatHaltBlock, a);404 rc = RTSemEventWait(pUV M->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));405 STAM_REL_PROFILE_STOP(&pUV M->vm.s.StatHaltBlock, a);429 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a); 430 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15)); 431 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a); 406 432 } 407 433 //uint64_t u64Slept = RTTimeNanoTS() - u64Start; … … 414 440 AssertRC(rc != VERR_INTERRUPTED); 415 441 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc)); 416 ASMAtomicUoWriteBool(&pUV M->vm.s.fTerminateEMT, true);442 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true); 417 443 VM_FF_SET(pVM, VM_FF_TERMINATE); 418 444 rc = VERR_INTERNAL_ERROR; … … 421 447 } 422 448 423 ASMAtomicUoWriteBool(&pUV M->vm.s.fWait, false);449 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false); 424 450 return rc; 425 451 } … … 500 526 * the lag has been eliminated. 501 527 */ 502 static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVM pUVM, PVMCPU pVCpu, const uint32_t fMask, uint64_t u64Now) 503 { 504 PVM pVM = pUVM->pVM; 528 static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now) 529 { 530 PUVM pUVM = pUVCpu->pUVM; 531 PVMCPU pVCpu = pUVCpu->pVCpu; 532 PVM pVM = pUVCpu->pVM; 505 533 506 534 /* … … 515 543 if (u32CatchUpPct /* non-zero if catching up */) 516 544 { 517 if (pUV M->vm.s.Halt.Method12.u64StartSpinTS)545 if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) 518 546 { 519 547 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg; … … 521 549 { 522 550 uint64_t u64Lag = TMVirtualSyncGetLag(pVM); 523 fBlockOnce = u64Now - pUV M->vm.s.Halt.Method12.u64LastBlockTS551 fBlockOnce = u64Now - pUVCpu->vm.s.Halt.Method12.u64LastBlockTS 524 552 > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg, 525 553 RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg, … … 528 556 else 529 557 { 530 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUV M->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);531 pUV M->vm.s.Halt.Method12.u64StartSpinTS = 0;558 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000); 559 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0; 532 560 } 533 561 } … … 536 564 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg; 537 565 if (fSpinning) 538 pUV M->vm.s.Halt.Method12.u64StartSpinTS = u64Now;539 } 540 } 541 else if (pUV M->vm.s.Halt.Method12.u64StartSpinTS)542 { 543 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUV M->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);544 pUV M->vm.s.Halt.Method12.u64StartSpinTS = 0;566 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = u64Now; 567 } 568 } 569 else if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) 570 { 571 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000); 572 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0; 545 573 } 546 574 … … 549 577 */ 550 578 int rc = VINF_SUCCESS; 551 ASMAtomicWriteBool(&pUV M->vm.s.fWait, true);579 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true); 552 580 unsigned cLoops = 0; 553 581 for (;; cLoops++) … … 556 584 * Work the timers and check if we can exit. 557 585 */ 558 STAM_REL_PROFILE_START(&pUV M->vm.s.StatHaltTimers, b);586 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b); 559 587 TMR3TimerQueuesDo(pVM); 560 STAM_REL_PROFILE_STOP(&pUV M->vm.s.StatHaltTimers, b);588 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b); 561 589 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 562 590 || VMCPU_FF_ISPENDING(pVCpu, fMask)) … … 582 610 #endif 583 611 { 584 const uint64_t Start = pUV M->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();612 const uint64_t Start = pUVCpu->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS(); 585 613 VMMR3YieldStop(pVM); 586 614 587 615 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15); 588 if (cMilliSecs <= pUV M->vm.s.Halt.Method12.cNSBlockedTooLongAvg)616 if (cMilliSecs <= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg) 589 617 cMilliSecs = 1; 590 618 else 591 cMilliSecs -= pUV M->vm.s.Halt.Method12.cNSBlockedTooLongAvg;619 cMilliSecs -= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg; 592 620 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS); 593 STAM_REL_PROFILE_START(&pUV M->vm.s.StatHaltBlock, a);594 rc = RTSemEventWait(pUV M->vm.s.EventSemWait, cMilliSecs);595 STAM_REL_PROFILE_STOP(&pUV M->vm.s.StatHaltBlock, a);621 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a); 622 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, cMilliSecs); 623 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a); 596 624 if (rc == VERR_TIMEOUT) 597 625 rc = VINF_SUCCESS; … … 600 628 AssertRC(rc != VERR_INTERRUPTED); 601 629 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc)); 602 ASMAtomicUoWriteBool(&pUV M->vm.s.fTerminateEMT, true);630 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true); 603 631 VM_FF_SET(pVM, VM_FF_TERMINATE); 604 632 rc = VERR_INTERNAL_ERROR; … … 611 639 */ 612 640 const uint64_t Elapsed = RTTimeNanoTS() - Start; 613 pUV M->vm.s.Halt.Method12.cNSBlocked += Elapsed;641 pUVCpu->vm.s.Halt.Method12.cNSBlocked += Elapsed; 614 642 if (Elapsed > u64NanoTS) 615 pUV M->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;616 pUV M->vm.s.Halt.Method12.cBlocks++;617 if (!(pUV M->vm.s.Halt.Method12.cBlocks & 0xf))618 { 619 pUV M->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVM->vm.s.Halt.Method12.cNSBlockedTooLong / pUVM->vm.s.Halt.Method12.cBlocks;620 if (!(pUV M->vm.s.Halt.Method12.cBlocks & 0x3f))643 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS; 644 pUVCpu->vm.s.Halt.Method12.cBlocks++; 645 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0xf)) 646 { 647 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong / pUVCpu->vm.s.Halt.Method12.cBlocks; 648 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0x3f)) 621 649 { 622 pUV M->vm.s.Halt.Method12.cNSBlockedTooLong = pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;623 pUV M->vm.s.Halt.Method12.cBlocks = 0x40;650 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40; 651 pUVCpu->vm.s.Halt.Method12.cBlocks = 0x40; 624 652 } 625 653 } … … 636 664 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct); 637 665 638 ASMAtomicUoWriteBool(&pUV M->vm.s.fWait, false);666 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false); 639 667 return rc; 640 668 } … … 657 685 * try take care of the global scheduling of EMT threads. 658 686 */ 659 static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVM pUVM, PVMCPU pVCpu, const uint32_t fMask, uint64_t u64Now) 660 { 661 PVM pVM = pUVM->pVM; 687 static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now) 688 { 689 PUVM pUVM = pUVCpu->pUVM; 690 PVMCPU pVCpu = pUVCpu->pVCpu; 691 PVM pVM = pUVCpu->pVM; 662 692 663 693 /* … … 665 695 */ 666 696 int rc = VINF_SUCCESS; 667 ASMAtomicWriteBool(&pUV M->vm.s.fWait, true);697 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true); 668 698 unsigned cLoops = 0; 669 699 for (;; cLoops++) … … 672 702 * Work the timers and check if we can exit. 673 703 */ 674 STAM_REL_PROFILE_START(&pUV M->vm.s.StatHaltTimers, b);704 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b); 675 705 TMR3TimerQueuesDo(pVM); 676 STAM_REL_PROFILE_STOP(&pUV M->vm.s.StatHaltTimers, b);706 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b); 677 707 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 678 708 || VMCPU_FF_ISPENDING(pVCpu, fMask)) … … 699 729 700 730 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS); 701 STAM_REL_PROFILE_START(&pUV M->vm.s.StatHaltBlock, c);731 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, c); 702 732 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL); 703 STAM_REL_PROFILE_STOP(&pUV M->vm.s.StatHaltBlock, c);733 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, c); 704 734 if (rc == VERR_INTERRUPTED) 705 735 rc = VINF_SUCCESS; … … 707 737 { 708 738 AssertMsgFailed(("VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc)); 709 ASMAtomicUoWriteBool(&pUV M->vm.s.fTerminateEMT, true);739 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true); 710 740 VM_FF_SET(pVM, VM_FF_TERMINATE); 711 741 rc = VERR_INTERNAL_ERROR; … … 719 749 else if (!(cLoops & 0x1fff)) 720 750 { 721 STAM_REL_PROFILE_START(&pUV M->vm.s.StatHaltYield, d);751 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltYield, d); 722 752 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL); 723 STAM_REL_PROFILE_STOP(&pUV M->vm.s.StatHaltYield, d);753 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltYield, d); 724 754 } 725 755 } 726 756 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct); 727 757 728 ASMAtomicUoWriteBool(&pUV M->vm.s.fWait, false);758 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false); 729 759 return rc; 730 760 } … … 735 765 * 736 766 * @returns VBox status code. 737 * @param pUV M Pointer to the user mode VMstructure.738 */ 739 static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVM pUVM)740 { 741 ASMAtomicWriteBool(&pUV M->vm.s.fWait, true);742 743 PVM pVM = pUV M->pVM;767 * @param pUVCpu Pointer to the user mode VMCPU structure. 768 */ 769 static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVMCPU pUVCpu) 770 { 771 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true); 772 773 PVM pVM = pUVCpu->pUVM->pVM; 744 774 PVMCPU pVCpu = VMMGetCpu(pVM); 745 775 … … 764 794 { 765 795 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc)); 766 ASMAtomicUoWriteBool(&pUV M->vm.s.fTerminateEMT, true);796 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true); 767 797 VM_FF_SET(pVM, VM_FF_TERMINATE); 768 798 rc = VERR_INTERNAL_ERROR; … … 772 802 } 773 803 774 ASMAtomicUoWriteBool(&pUV M->vm.s.fWait, false);804 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false); 775 805 return rc; 776 806 } … … 780 810 * The global 1 halt method - VMR3NotifyFF() worker. 781 811 * 782 * @param pUV M Pointer to the user mode VMstructure.812 * @param pUVCpu Pointer to the user mode VMCPU structure. 783 813 * @param fNotifiedREM See VMR3NotifyFF(). 784 814 */ 785 static DECLCALLBACK(void) vmR3HaltGlobal1NotifyFF(PUVM pUVM, bool fNotifiedREM)786 { 787 if (pUV M->vm.s.fWait)788 { 789 int rc = SUPCallVMMR0Ex(pUV M->pVM->pVMR0, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);815 static DECLCALLBACK(void) vmR3HaltGlobal1NotifyFF(PUVMCPU pUVCpu, bool fNotifiedREM) 816 { 817 if (pUVCpu->vm.s.fWait) 818 { 819 int rc = SUPCallVMMR0Ex(pUVCpu->pVM->pVMR0, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL); 790 820 AssertRC(rc); 791 821 } 792 822 else if (!fNotifiedREM) 793 REMR3NotifyFF(pUV M->pVM);823 REMR3NotifyFF(pUVCpu->pVM); 794 824 } 795 825 … … 799 829 * 800 830 * @returns VBox status code. 801 * @param pUVM Pointer to the user mode VM structure. 802 */ 803 static DECLCALLBACK(int) vmR3BootstrapWait(PUVM pUVM) 804 { 805 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true); 831 * @param pUVMCPU Pointer to the user mode VMCPU structure. 832 */ 833 static DECLCALLBACK(int) vmR3BootstrapWait(PUVMCPU pUVCpu) 834 { 835 PUVM pUVM = pUVCpu->pUVM; 836 837 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true); 806 838 807 839 int rc = VINF_SUCCESS; … … 811 843 * Check Relevant FFs. 812 844 */ 813 if (pUVM->vm.s.pReqs) 814 break; 815 if ( pUVM->pVM 816 && ( VM_FF_ISPENDING(pUVM->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK) 817 || VMCPU_FF_ISPENDING(VMMGetCpu(pUVM->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK) 845 if (pUVM->vm.s.pReqs) /* global requests pending? */ 846 break; 847 if (pUVCpu->vm.s.pReqs) /* local requests pending? */ 848 break; 849 850 if ( pUVCpu->pVM 851 && ( VM_FF_ISPENDING(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK) 852 || VMCPU_FF_ISPENDING(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK) 818 853 ) 819 854 ) 820 855 break; 821 if (pUV M->vm.s.fTerminateEMT)856 if (pUVCpu->vm.s.fTerminateEMT) 822 857 break; 823 858 … … 826 861 * anything needs our attention. 827 862 */ 828 rc = RTSemEventWait(pUV M->vm.s.EventSemWait, 1000);863 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000); 829 864 if (rc == VERR_TIMEOUT) 830 865 rc = VINF_SUCCESS; … … 832 867 { 833 868 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc)); 834 ASMAtomicUoWriteBool(&pUV M->vm.s.fTerminateEMT, true);835 if (pUV M->pVM)836 VM_FF_SET(pUV M->pVM, VM_FF_TERMINATE);869 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true); 870 if (pUVCpu->pVM) 871 VM_FF_SET(pUVCpu->pVM, VM_FF_TERMINATE); 837 872 rc = VERR_INTERNAL_ERROR; 838 873 break; … … 841 876 } 842 877 843 ASMAtomicUoWriteBool(&pUV M->vm.s.fWait, false);878 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false); 844 879 return rc; 845 880 } … … 849 884 * Bootstrap VMR3NotifyFF() worker. 850 885 * 851 * @param pUV M Pointer to the user mode VMstructure.886 * @param pUVCpu Pointer to the user mode VMCPU structure. 852 887 * @param fNotifiedREM See VMR3NotifyFF(). 853 888 */ 854 static DECLCALLBACK(void) vmR3BootstrapNotifyFF(PUVM pUVM, bool fNotifiedREM)855 { 856 if (pUV M->vm.s.fWait)857 { 858 int rc = RTSemEventSignal(pUV M->vm.s.EventSemWait);889 static DECLCALLBACK(void) vmR3BootstrapNotifyFF(PUVMCPU pUVCpu, bool fNotifiedREM) 890 { 891 if (pUVCpu->vm.s.fWait) 892 { 893 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait); 859 894 AssertRC(rc); 860 895 } … … 866 901 * 867 902 * @returns VBox status code. 868 * @param pUVM Pointer to the user mode VMstructure.869 */ 870 static DECLCALLBACK(int) vmR3DefaultWait(PUVM pUVM)871 { 872 ASMAtomicWriteBool(&pUV M->vm.s.fWait, true);873 874 PVM pVM = pUV M->pVM;875 PVMCPU pVCpu = VMMGetCpu(pVM);903 * @param pUVMCPU Pointer to the user mode VMCPU structure. 904 */ 905 static DECLCALLBACK(int) vmR3DefaultWait(PUVMCPU pUVCpu) 906 { 907 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true); 908 909 PVM pVM = pUVCpu->pVM; 910 PVMCPU pVCpu = pUVCpu->pVCpu; 876 911 int rc = VINF_SUCCESS; 877 912 for (;;) … … 888 923 * anything needs our attention. 889 924 */ 890 rc = RTSemEventWait(pUV M->vm.s.EventSemWait, 1000);925 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000); 891 926 if (rc == VERR_TIMEOUT) 892 927 rc = VINF_SUCCESS; … … 894 929 { 895 930 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc)); 896 ASMAtomicUoWriteBool(&pUV M->vm.s.fTerminateEMT, true);931 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true); 897 932 VM_FF_SET(pVM, VM_FF_TERMINATE); 898 933 rc = VERR_INTERNAL_ERROR; … … 902 937 } 903 938 904 ASMAtomicUoWriteBool(&pUV M->vm.s.fWait, false);939 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false); 905 940 return rc; 906 941 } … … 910 945 * Default VMR3NotifyFF() worker. 911 946 * 912 * @param pUV M Pointer to the user mode VMstructure.947 * @param pUVCpu Pointer to the user mode VMCPU structure. 913 948 * @param fNotifiedREM See VMR3NotifyFF(). 914 949 */ 915 static DECLCALLBACK(void) vmR3DefaultNotifyFF(PUVM pUVM, bool fNotifiedREM)916 { 917 if (pUV M->vm.s.fWait)918 { 919 int rc = RTSemEventSignal(pUV M->vm.s.EventSemWait);950 static DECLCALLBACK(void) vmR3DefaultNotifyFF(PUVMCPU pUVCpu, bool fNotifiedREM) 951 { 952 if (pUVCpu->vm.s.fWait) 953 { 954 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait); 920 955 AssertRC(rc); 921 956 } 922 957 else if (!fNotifiedREM) 923 REMR3NotifyFF(pUV M->pVM);958 REMR3NotifyFF(pUVCpu->pVM); 924 959 } 925 960 … … 938 973 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM)); 939 974 /** The halt function. */ 940 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVM pUVM, PVMCPU pVCpu, const uint32_t fMask, uint64_t u64Now));975 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)); 941 976 /** The wait function. */ 942 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVM pUVM));977 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVMCPU pUVCpu)); 943 978 /** The notifyFF function. */ 944 DECLR3CALLBACKMEMBER(void, pfnNotifyFF,(PUVM pUVM, bool fNotifiedREM));979 DECLR3CALLBACKMEMBER(void, pfnNotifyFF,(PUVMCPU pUVCpu, bool fNotifiedREM)); 945 980 } g_aHaltMethods[] = 946 981 { … … 948 983 { VMHALTMETHOD_OLD, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyFF }, 949 984 { VMHALTMETHOD_1, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyFF }, 950 //{ VMHALTMETHOD_2, vmR3HaltMethod2Init, vmR3HaltMethod2Term, vmR3HaltMethod2DoHalt, vmR3HaltMethod2Wait, vmR3HaltMethod2NotifyFF },951 985 { VMHALTMETHOD_GLOBAL_1,vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyFF }, 952 986 }; … … 960 994 * 961 995 * @param pVM VM handle. 996 * @param pVCpu VMCPU handle (NULL if all/global notification) 962 997 * @param fNotifiedREM Set if REM have already been notified. If clear the 963 998 * generic REMR3NotifyFF() method is called. 964 999 */ 965 VMMR3DECL(void) VMR3NotifyFF(PVM pVM, bool fNotifiedREM) 966 { 1000 VMMR3DECL(void) VMR3NotifyGlobalFF(PVM pVM, bool fNotifiedREM) 1001 { 1002 PUVM pUVM = pVM->pUVM; 1003 967 1004 LogFlow(("VMR3NotifyFF:\n")); 968 PUVM pUVM = pVM->pUVM; 969 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVM, fNotifiedREM); 1005 /** @todo might want to have a 2nd look at this (SMP) */ 1006 for (unsigned iCpu=0;iCpu<pVM->cCPUs;iCpu++) 1007 { 1008 PUVMCPU pUVCpu = pVM->aCpus[iCpu].pUVCpu; 1009 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVCpu, fNotifiedREM); 1010 } 1011 } 1012 1013 /** 1014 * Notify the emulation thread (EMT) about pending Forced Action (FF). 1015 * 1016 * This function is called by thread other than EMT to make 1017 * sure EMT wakes up and promptly service an FF request. 1018 * 1019 * @param pVM VM handle. 1020 * @param pVCpu VMCPU handle (NULL if all/global notification) 1021 * @param fNotifiedREM Set if REM have already been notified. If clear the 1022 * generic REMR3NotifyFF() method is called. 1023 */ 1024 VMMR3DECL(void) VMR3NotifyCpuFF(PVMCPU pVCpu, bool fNotifiedREM) 1025 { 1026 PUVMCPU pUVCpu = pVCpu->pUVCpu; 1027 PUVM pUVM = pUVCpu->pUVM; 1028 1029 LogFlow(("VMR3NotifyCpuFF:\n")); 1030 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVCpu, fNotifiedREM); 970 1031 } 971 1032 … … 981 1042 * generic REMR3NotifyFF() method is called. 982 1043 */ 983 VMMR3DECL(void) VMR3NotifyFFU(PUVM pUVM, bool fNotifiedREM) 984 { 985 LogFlow(("VMR3NotifyFF:\n")); 986 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVM, fNotifiedREM); 1044 VMMR3DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, bool fNotifiedREM) 1045 { 1046 LogFlow(("VMR3NotifyGlobalFFU:\n")); 1047 /** @todo might want to have a 2nd look at this (SMP) */ 1048 for (unsigned iCpu=0;iCpu<pUVM->cCpus;iCpu++) 1049 { 1050 PUVMCPU pUVCpu = &pUVM->aCpus[iCpu]; 1051 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVCpu, fNotifiedREM); 1052 } 1053 } 1054 1055 /** 1056 * Notify the emulation thread (EMT) about pending Forced Action (FF). 1057 * 1058 * This function is called by thread other than EMT to make 1059 * sure EMT wakes up and promptly service an FF request. 1060 * 1061 * @param pUVM Pointer to the user mode VM structure. 1062 * @param fNotifiedREM Set if REM have already been notified. If clear the 1063 * generic REMR3NotifyFF() method is called. 1064 */ 1065 VMMR3DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVCpu, bool fNotifiedREM) 1066 { 1067 PUVM pUVM = pUVCpu->pUVM; 1068 1069 LogFlow(("VMR3NotifyCpuFFU:\n")); 1070 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVCpu, fNotifiedREM); 987 1071 } 988 1072 … … 1026 1110 * Record halt averages for the last second. 1027 1111 */ 1028 PUVM pUVM = pVM->pUVM;1112 PUVMCPU pUVCpu = pVCpu->pUVCpu; 1029 1113 uint64_t u64Now = RTTimeNanoTS(); 1030 int64_t off = u64Now - pUV M->vm.s.u64HaltsStartTS;1114 int64_t off = u64Now - pUVCpu->vm.s.u64HaltsStartTS; 1031 1115 if (off > 1000000000) 1032 1116 { 1033 if (off > _4G || !pUV M->vm.s.cHalts)1034 { 1035 pUV M->vm.s.HaltInterval = 1000000000 /* 1 sec */;1036 pUV M->vm.s.HaltFrequency = 1;1117 if (off > _4G || !pUVCpu->vm.s.cHalts) 1118 { 1119 pUVCpu->vm.s.HaltInterval = 1000000000 /* 1 sec */; 1120 pUVCpu->vm.s.HaltFrequency = 1; 1037 1121 } 1038 1122 else 1039 1123 { 1040 pUV M->vm.s.HaltInterval = (uint32_t)off / pUVM->vm.s.cHalts;1041 pUV M->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVM->vm.s.cHalts, 1000000000, (uint32_t)off);1042 } 1043 pUV M->vm.s.u64HaltsStartTS = u64Now;1044 pUV M->vm.s.cHalts = 0;1045 } 1046 pUV M->vm.s.cHalts++;1124 pUVCpu->vm.s.HaltInterval = (uint32_t)off / pUVCpu->vm.s.cHalts; 1125 pUVCpu->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVCpu->vm.s.cHalts, 1000000000, (uint32_t)off); 1126 } 1127 pUVCpu->vm.s.u64HaltsStartTS = u64Now; 1128 pUVCpu->vm.s.cHalts = 0; 1129 } 1130 pUVCpu->vm.s.cHalts++; 1047 1131 1048 1132 /* 1049 1133 * Do the halt. 1050 1134 */ 1051 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVM, pVCpu, fMask, u64Now); 1135 PUVM pUVM = pUVCpu->pUVM; 1136 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVCpu, fMask, u64Now); 1052 1137 1053 1138 /* … … 1069 1154 * @returns VINF_SUCCESS unless a fatal error occured. In the latter 1070 1155 * case an appropriate status code is returned. 1071 * @param pUV M Pointer to the user mode VMstructure.1156 * @param pUVCpu Pointer to the user mode VMCPU structure. 1072 1157 * @thread The emulation thread. 1073 1158 */ 1074 VMMR3DECL(int) VMR3WaitU(PUVM pUVM)1159 VMMR3DECL(int) VMR3WaitU(PUVMCPU pUVCpu) 1075 1160 { 1076 1161 LogFlow(("VMR3WaitU:\n")); … … 1079 1164 * Check Relevant FFs. 1080 1165 */ 1081 PVM pVM = pUVM->pVM; 1166 PVM pVM = pUVCpu->pVM; 1167 PVMCPU pVCpu = pUVCpu->pVCpu; 1082 1168 1083 1169 if ( pVM 1084 1170 && ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK) 1085 || VMCPU_FF_ISPENDING( VMMGetCpu(pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)1171 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK) 1086 1172 ) 1087 1173 ) … … 1095 1181 * doesn't have to special case anything). 1096 1182 */ 1097 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVM); 1183 PUVM pUVM = pUVCpu->pUVM; 1184 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVCpu); 1098 1185 LogFlow(("VMR3WaitU: returns %Rrc (FF %#x)\n", rc, pVM ? pVM->fGlobalForcedActions : 0)); 1099 1186 return rc; -
trunk/src/VBox/VMM/VMInternal.h
r18645 r19217 262 262 PSUPDRVSESSION pSession; 263 263 264 /** Force EMT to terminate. */ 265 bool volatile fTerminateEMT; 266 /** If set the EMT does the final VM cleanup when it exits. 267 * If clear the VMR3Destroy() caller does so. */ 268 bool fEMTDoesTheCleanup; 269 270 /** List of registered reset callbacks. */ 271 PVMATRESET pAtReset; 272 /** List of registered reset callbacks. */ 273 PVMATRESET *ppAtResetNext; 274 275 /** List of registered state change callbacks. */ 276 PVMATSTATE pAtState; 277 /** List of registered state change callbacks. */ 278 PVMATSTATE *ppAtStateNext; 279 280 /** List of registered error callbacks. */ 281 PVMATERROR pAtError; 282 /** List of registered error callbacks. */ 283 PVMATERROR *ppAtErrorNext; 284 285 /** List of registered error callbacks. */ 286 PVMATRUNTIMEERROR pAtRuntimeError; 287 /** List of registered error callbacks. */ 288 PVMATRUNTIMEERROR *ppAtRuntimeErrorNext; 289 290 /** @name Generic Halt data 291 * @{ 292 */ 293 /** The current halt method. 294 * Can be selected by CFGM option 'VM/HaltMethod'. */ 295 VMHALTMETHOD enmHaltMethod; 296 /** The index into g_aHaltMethods of the current halt method. */ 297 uint32_t volatile iHaltMethod; 298 /** @} */ 299 300 union 301 { 302 /** 303 * Method 1 & 2 - Block whenever possible, and when lagging behind 304 * switch to spinning with regular blocking every 5-200ms (defaults) 305 * depending on the accumulated lag. The blocking interval is adjusted 306 * with the average oversleeping of the last 64 times. 307 * 308 * The difference between 1 and 2 is that we use native absolute 309 * time APIs for the blocking instead of the millisecond based IPRT 310 * interface. 311 */ 312 struct 313 { 314 /** The max interval without blocking (when spinning). */ 315 uint32_t u32MinBlockIntervalCfg; 316 /** The minimum interval between blocking (when spinning). */ 317 uint32_t u32MaxBlockIntervalCfg; 318 /** The value to divide the current lag by to get the raw blocking interval (when spinning). */ 319 uint32_t u32LagBlockIntervalDivisorCfg; 320 /** When to start spinning (lag / nano secs). */ 321 uint32_t u32StartSpinningCfg; 322 /** When to stop spinning (lag / nano secs). */ 323 uint32_t u32StopSpinningCfg; 324 } Method12; 325 } Halt; 326 327 /** Pointer to the DBGC instance data. */ 328 void *pvDBGC; 329 330 /** TLS index for the VMINTUSERPERVMCPU pointer. */ 331 RTTLS idxTLS; 332 } VMINTUSERPERVM; 333 334 /** Pointer to the VM internal data kept in the UVM. */ 335 typedef VMINTUSERPERVM *PVMINTUSERPERVM; 336 337 338 /** 339 * VMCPU internal data kept in the UVM. 340 * 341 * Almost a copy of VMINTUSERPERVM. Separate data properly later on. 342 */ 343 typedef struct VMINTUSERPERVMCPU 344 { 345 /** Head of the request queue. Atomic. */ 346 volatile PVMREQ pReqs; 347 348 /** The handle to the EMT thread. */ 349 RTTHREAD ThreadEMT; 350 /** The native of the EMT thread. */ 351 RTNATIVETHREAD NativeThreadEMT; 264 352 /** Wait event semaphore. */ 265 353 RTSEMEVENT EventSemWait; … … 275 363 * @{ 276 364 */ 277 /** The current halt method.278 * Can be selected by CFGM option 'VM/HaltMethod'. */279 VMHALTMETHOD enmHaltMethod;280 /** The index into g_aHaltMethods of the current halt method. */281 uint32_t volatile iHaltMethod;282 365 /** The average time (ns) between two halts in the last second. (updated once per second) */ 283 366 uint32_t HaltInterval; … … 320 403 * This is 0 when we're not spinning. */ 321 404 uint64_t u64StartSpinTS; 322 323 /** The max interval without blocking (when spinning). */324 uint32_t u32MinBlockIntervalCfg;325 /** The minimum interval between blocking (when spinning). */326 uint32_t u32MaxBlockIntervalCfg;327 /** The value to divide the current lag by to get the raw blocking interval (when spinning). */328 uint32_t u32LagBlockIntervalDivisorCfg;329 /** When to start spinning (lag / nano secs). */330 uint32_t u32StartSpinningCfg;331 /** When to stop spinning (lag / nano secs). */332 uint32_t u32StopSpinningCfg;333 405 } Method12; 334 406 … … 376 448 /** @} */ 377 449 378 379 /** List of registered reset callbacks. */380 PVMATRESET pAtReset;381 /** List of registered reset callbacks. */382 PVMATRESET *ppAtResetNext;383 384 /** List of registered state change callbacks. */385 PVMATSTATE pAtState;386 /** List of registered state change callbacks. */387 PVMATSTATE *ppAtStateNext;388 389 /** List of registered error callbacks. */390 PVMATERROR pAtError;391 /** List of registered error callbacks. */392 PVMATERROR *ppAtErrorNext;393 394 /** List of registered error callbacks. */395 PVMATRUNTIMEERROR pAtRuntimeError;396 /** List of registered error callbacks. */397 PVMATRUNTIMEERROR *ppAtRuntimeErrorNext;398 399 /** Pointer to the DBGC instance data. */400 void *pvDBGC;401 402 /** TLS index for the VMINTUSERPERVMCPU pointer. */403 RTTLS idxTLS;404 } VMINTUSERPERVM;405 406 /** Pointer to the VM internal data kept in the UVM. */407 typedef VMINTUSERPERVM *PVMINTUSERPERVM;408 409 410 /**411 * VMCPU internal data kept in the UVM.412 *413 * Almost a copy of VMINTUSERPERVM. Separate data properly later on.414 */415 typedef struct VMINTUSERPERVMCPU416 {417 /** Head of the request queue. Atomic. */418 volatile PVMREQ pReqs;419 420 /** The handle to the EMT thread. */421 RTTHREAD ThreadEMT;422 /** The native of the EMT thread. */423 RTNATIVETHREAD NativeThreadEMT;424 /** Wait event semaphore. */425 RTSEMEVENT EventSemWait;426 /** Wait/Idle indicator. */427 bool volatile fWait;428 /** Force EMT to terminate. */429 bool volatile fTerminateEMT;430 /** If set the EMT does the final VM cleanup when it exits.431 * If clear the VMR3Destroy() caller does so. */432 bool fEMTDoesTheCleanup;433 434 /** @name Generic Halt data435 * @{436 */437 /** The current halt method.438 * Can be selected by CFGM option 'VM/HaltMethod'. */439 VMHALTMETHOD enmHaltMethod;440 /** The index into g_aHaltMethods of the current halt method. */441 uint32_t volatile iHaltMethod;442 /** The average time (ns) between two halts in the last second. (updated once per second) */443 uint32_t HaltInterval;444 /** The average halt frequency for the last second. (updated once per second) */445 uint32_t HaltFrequency;446 /** The number of halts in the current period. */447 uint32_t cHalts;448 uint32_t padding; /**< alignment padding. */449 /** When we started counting halts in cHalts (RTTimeNanoTS). */450 uint64_t u64HaltsStartTS;451 /** @} */452 453 /** Union containing data and config for the different halt algorithms. */454 union455 {456 /**457 * Method 1 & 2 - Block whenever possible, and when lagging behind458 * switch to spinning with regular blocking every 5-200ms (defaults)459 * depending on the accumulated lag. The blocking interval is adjusted460 * with the average oversleeping of the last 64 times.461 *462 * The difference between 1 and 2 is that we use native absolute463 * time APIs for the blocking instead of the millisecond based IPRT464 * interface.465 */466 struct467 {468 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */469 uint32_t cBlocks;470 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */471 uint64_t cNSBlockedTooLongAvg;472 /** Total time spend oversleeping when blocking. */473 uint64_t cNSBlockedTooLong;474 /** Total time spent blocking. */475 uint64_t cNSBlocked;476 /** The timestamp (RTTimeNanoTS) of the last block. */477 uint64_t u64LastBlockTS;478 479 /** When we started spinning relentlessly in order to catch up some of the oversleeping.480 * This is 0 when we're not spinning. */481 uint64_t u64StartSpinTS;482 483 /** The max interval without blocking (when spinning). */484 uint32_t u32MinBlockIntervalCfg;485 /** The minimum interval between blocking (when spinning). */486 uint32_t u32MaxBlockIntervalCfg;487 /** The value to divide the current lag by to get the raw blocking interval (when spinning). */488 uint32_t u32LagBlockIntervalDivisorCfg;489 /** When to start spinning (lag / nano secs). */490 uint32_t u32StartSpinningCfg;491 /** When to stop spinning (lag / nano secs). */492 uint32_t u32StopSpinningCfg;493 } Method12;494 495 #if 0496 /**497 * Method 3 & 4 - Same as method 1 & 2 respectivly, except that we498 * sprinkle it with yields.499 */500 struct501 {502 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */503 uint32_t cBlocks;504 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */505 uint64_t cBlockedTooLongNSAvg;506 /** Total time spend oversleeping when blocking. */507 uint64_t cBlockedTooLongNS;508 /** Total time spent blocking. */509 uint64_t cBlockedNS;510 /** The timestamp (RTTimeNanoTS) of the last block. */511 uint64_t u64LastBlockTS;512 513 /** How many times we've yielded while cBlockedNS and cBlockedTooLongNS has been accumulating. */514 uint32_t cYields;515 /** Avg. time spend oversleeping when yielding. */516 uint32_t cYieldTooLongNSAvg;517 /** Total time spend oversleeping when yielding. */518 uint64_t cYieldTooLongNS;519 /** Total time spent yielding. */520 uint64_t cYieldedNS;521 /** The timestamp (RTTimeNanoTS) of the last block. */522 uint64_t u64LastYieldTS;523 524 /** When we started spinning relentlessly in order to catch up some of the oversleeping. */525 uint64_t u64StartSpinTS;526 } Method34;527 #endif528 } Halt;529 530 /** Profiling the halted state; yielding vs blocking.531 * @{ */532 STAMPROFILE StatHaltYield;533 STAMPROFILE StatHaltBlock;534 STAMPROFILE StatHaltTimers;535 STAMPROFILE StatHaltPoll;536 /** @} */537 538 /** Pointer to the DBGC instance data. */539 void *pvDBGC;540 541 450 /** vmR3EmulationThread longjmp buffer. Must be last in the structure. */ 542 451 jmp_buf emtJumpEnv; -
trunk/src/VBox/VMM/VMMAll/PDMAllQueue.cpp
r19141 r19217 94 94 #ifdef IN_RING3 95 95 REMR3NotifyQueuePending(pVM); /** @todo r=bird: we can remove REMR3NotifyQueuePending and let VMR3NotifyFF do the work. */ 96 VMR3Notify FF(pVM, true);96 VMR3NotifyGlobalFF(pVM, true); 97 97 #endif 98 98 } -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r19032 r19217 146 146 #ifdef IN_RING3 147 147 REMR3NotifyTimerPending(pVM); 148 VMR3Notify FF(pVM, true);148 VMR3NotifyGlobalFF(pVM, true); 149 149 #endif 150 150 } -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r19141 r19217 348 348 #ifdef IN_RING3 349 349 REMR3NotifyTimerPending(pVM); 350 VMR3Notify FF(pVM, true);350 VMR3NotifyGlobalFF(pVM, true); 351 351 #endif 352 352 } … … 422 422 #ifdef IN_RING3 423 423 REMR3NotifyTimerPending(pVM); 424 VMR3Notify FF(pVM, true);424 VMR3NotifyGlobalFF(pVM, true); 425 425 #endif 426 426 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF); … … 499 499 #ifdef IN_RING3 500 500 REMR3NotifyTimerPending(pVM); 501 VMR3Notify FF(pVM, true);501 VMR3NotifyGlobalFF(pVM, true); 502 502 #endif 503 503 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF); … … 526 526 #ifdef IN_RING3 527 527 REMR3NotifyTimerPending(pVM); 528 VMR3Notify FF(pVM, true);528 VMR3NotifyGlobalFF(pVM, true); 529 529 #endif 530 530 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF); -
trunk/src/VBox/VMM/VMReq.cpp
r19179 r19217 401 401 VERR_VM_REQUEST_INVALID_TYPE); 402 402 AssertPtrReturn(ppReq, VERR_INVALID_POINTER); 403 AssertMsgReturn(enmDest == VMREQDEST_ANY || enmDest == VMREQDEST_BROADCAST || (unsigned)enmDest < pUVM-> pVM->cCPUs, ("Invalid destination %d (max=%d)\n", enmDest, pUVM->pVM->cCPUs), VERR_INVALID_PARAMETER);403 AssertMsgReturn(enmDest == VMREQDEST_ANY || enmDest == VMREQDEST_BROADCAST || (unsigned)enmDest < pUVM->cCpus, ("Invalid destination %d (max=%d)\n", enmDest, pUVM->cCpus), VERR_INVALID_PARAMETER); 404 404 405 405 /* … … 614 614 VERR_VM_REQUEST_INVALID_TYPE); 615 615 616 /** @todo SMP: Temporary hack until the unicast and broadcast cases has been617 * implemented correctly below. It asserts + hangs now. */618 if (pReq->enmDest != VMREQDEST_ANY)619 pReq->enmDest = VMREQDEST_ANY;620 621 622 616 /* 623 617 * Are we the EMT or not? … … 632 626 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */ 633 627 634 for (unsigned i=0;i<pUVM-> pVM->cCPUs;i++)628 for (unsigned i=0;i<pUVM->cCpus;i++) 635 629 { 636 630 PVMCPU pVCpu = &pUVM->pVM->aCpus[i]; … … 655 649 if (pUVM->pVM) 656 650 VMCPU_FF_SET(pVCpu, VMCPU_FF_REQUEST); 657 /* @todo: VMR3NotifyFFU*/ 658 AssertFailed(); 659 VMR3NotifyFFU(pUVM, false); 651 VMR3NotifyCpuFFU(pUVCpu, false); 660 652 661 653 /* … … 685 677 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */ 686 678 679 /* Fetch the right UVMCPU */ 680 pUVCpu = &pUVM->aCpus[idTarget]; 681 687 682 /* 688 683 * Insert it. … … 701 696 if (pUVM->pVM) 702 697 VMCPU_FF_SET(pVCpu, VMCPU_FF_REQUEST); 703 /* @todo: VMR3NotifyFFU*/ 704 AssertFailed(); 705 VMR3NotifyFFU(pUVM, false); 698 VMR3NotifyCpuFFU(pUVCpu, false); 706 699 707 700 /* … … 733 726 if (pUVM->pVM) 734 727 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST); 735 VMR3Notify FFU(pUVM, false);728 VMR3NotifyGlobalFFU(pUVM, false); 736 729 737 730 /* … … 840 833 * Process loop. 841 834 * 842 * We do not repeat the outer loop if we've got an information tional status code835 * We do not repeat the outer loop if we've got an informational status code 843 836 * since that code needs processing by our caller. 844 837 */ … … 860 853 ppReqs = (void * volatile *)&pUVM->aCpus[enmDest].vm.s.pReqs; 861 854 if (RT_LIKELY(pUVM->pVM)) 862 { 863 PVMCPU pVCpu = &pUVM->pVM->aCpus[enmDest]; 864 865 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_REQUEST); 866 } 855 VMCPU_FF_CLEAR(&pUVM->pVM->aCpus[enmDest], VMCPU_FF_REQUEST); 867 856 } 868 857
Note:
See TracChangeset
for help on using the changeset viewer.