Changeset 19660 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- May 13, 2009 2:09:15 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r19538 r19660 188 188 tmUnlock(pVM); 189 189 } 190 else if (!VM_FF_ISSET(pVM, VM_FF_TIMER)) /**@todo only do this when arming the timer. */ 191 { 192 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF); 193 VM_FF_SET(pVM, VM_FF_TIMER); 190 else 191 { 192 /** @todo FIXME: don't use FF for scheduling! */ 193 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 194 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) /**@todo only do this when arming the timer. */ 195 { 196 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__)); 197 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 194 198 #ifdef IN_RING3 195 REMR3NotifyTimerPending(pVM); 196 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM); 197 #endif 199 REMR3NotifyTimerPending(pVM, pVCpuDst); 200 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM); 201 #endif 202 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF); 203 } 198 204 } 199 205 } … … 268 274 * This function is called before FFs are checked in the inner execution EM loops. 269 275 * 270 * @returns Virtual timer ticks to the next event. 276 * @returns Virtual timer ticks to the next event. (I.e. 0 means that an timer 277 * has expired or some important rescheduling is pending.) 271 278 * @param pVM Pointer to the shared VM structure. 279 * @param pVCpu Pointer to the shared VMCPU structure of the caller. 272 280 * @thread The emulation thread. 273 281 */ 274 VMMDECL(uint64_t) TMTimerPoll(PVM pVM) 275 { 276 int rc = tmLock(pVM); /* play safe for now */ 282 VMMDECL(uint64_t) TMTimerPoll(PVM pVM, PVMCPU pVCpu) 283 { 284 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */ 285 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 286 STAM_COUNTER_INC(&pVM->tm.s.StatPoll); 277 287 278 288 /* 279 * Return straight away if the timer FF is already set .289 * Return straight away if the timer FF is already set ... 280 290 */ 281 if (VM _FF_ISSET(pVM, VM_FF_TIMER))291 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) 282 292 { 283 293 STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet); 284 #ifndef IN_RING3 285 if (RT_SUCCESS(rc)) 286 #endif 287 tmUnlock(pVM); 288 return 0; 294 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet; 295 } 296 297 /* 298 * ... or if timers are being run. 299 */ 300 if (pVM->tm.s.fRunningQueues) 301 { 302 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning); 303 return s_u64OtherRet; 289 304 } 290 305 … … 292 307 * Get current time and check the expire times of the two relevant queues. 293 308 */ 294 const uint64_t u64Now = TMVirtualGet(pVM); 309 int rc = tmLock(pVM); /** @todo FIXME: Stop playing safe here... */ 310 const uint64_t u64Now = TMVirtualGetNoCheck(pVM); 295 311 296 312 /* 297 313 * TMCLOCK_VIRTUAL 298 314 */ 299 const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;300 const int64_t i64Delta1= u64Expire1 - u64Now;315 const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire; 316 const int64_t i64Delta1 = u64Expire1 - u64Now; 301 317 if (i64Delta1 <= 0) 302 318 { … … 307 323 #endif 308 324 tmUnlock(pVM); 309 VM_FF_SET(pVM, VM_FF_TIMER); 325 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 326 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 310 327 #ifdef IN_RING3 311 REMR3NotifyTimerPending(pVM );312 #endif 313 return 0;328 REMR3NotifyTimerPending(pVM, pVCpuDst); 329 #endif 330 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet; 314 331 } 315 332 … … 345 362 if (i64Delta2 <= 0) 346 363 { 364 if ( !pVM->tm.s.fRunningQueues 365 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) 366 { 367 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 368 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 369 #ifdef IN_RING3 370 REMR3NotifyTimerPending(pVM, pVCpuDst); 371 #endif 372 } 347 373 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync); 348 374 #ifndef IN_RING3 … … 351 377 tmUnlock(pVM); 352 378 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now)); 353 VM_FF_SET(pVM, VM_FF_TIMER); 354 #ifdef IN_RING3 355 REMR3NotifyTimerPending(pVM); 356 #endif 357 return 0; 379 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet; 358 380 } 359 381 if (pVM->tm.s.fVirtualSyncCatchUp) … … 380 402 * 0 if the next event has already expired. 381 403 * @param pVM Pointer to the shared VM structure. 382 * @param pV M Pointer to the shared VM structure.404 * @param pVCpu Pointer to the shared VMCPU structure of the caller. 383 405 * @param pu64Delta Where to store the delta. 384 406 * @thread The emulation thread. 385 407 */ 386 VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, uint64_t *pu64Delta) 387 { 388 int rc = tmLock(pVM); /* play safe for now. */ 408 VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta) 409 { 410 static const uint64_t s_u64OtherRet = 500000000; /* 500 million GIP ticks for non-timer EMTs. */ 411 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 412 const uint64_t u64Now = TMVirtualGetNoCheck(pVM); 413 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIP); 389 414 390 415 /* 391 * Return straight away if the timer FF is already set .416 * Return straight away if the timer FF is already set ... 392 417 */ 393 if (VM_FF_ISSET(pVM, VM_FF_TIMER)) 394 { 395 STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet); 396 #ifndef IN_RING3 397 if (RT_SUCCESS(rc)) 398 #endif 399 tmUnlock(pVM); 400 *pu64Delta = 0; 401 return 0; 418 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) 419 { 420 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPAlreadySet); 421 if (pVCpuDst == pVCpu) 422 { 423 *pu64Delta = 0; 424 return 0; 425 } 426 *pu64Delta = s_u64OtherRet; 427 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet; 402 428 } 403 429 404 430 /* 405 * Get current time and check the expire times of the two relevant queues.431 * ... or if timers are being run. 406 432 */ 407 const uint64_t u64Now = TMVirtualGet(pVM); 433 if (pVM->tm.s.fRunningQueues) 434 { 435 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPRunning); 436 *pu64Delta = s_u64OtherRet; 437 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet; 438 } 439 440 int rc = tmLock(pVM); /** @todo FIXME: Stop playin safe... */ 408 441 409 442 /* 410 * TMCLOCK_VIRTUAL443 * Check for TMCLOCK_VIRTUAL expiration. 411 444 */ 412 445 const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire; … … 414 447 if (i64Delta1 <= 0) 415 448 { 416 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual); 449 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPVirtual); 450 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 451 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 452 #ifdef IN_RING3 453 REMR3NotifyTimerPending(pVM, pVCpuDst); 454 #endif 417 455 #ifndef IN_RING3 418 456 if (RT_SUCCESS(rc)) … … 420 458 tmUnlock(pVM); 421 459 LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now)); 422 VM_FF_SET(pVM, VM_FF_TIMER); 423 #ifdef IN_RING3 424 REMR3NotifyTimerPending(pVM); 425 #endif 426 *pu64Delta = 0; 427 return 0; 460 if (pVCpuDst == pVCpu) 461 { 462 *pu64Delta = 0; 463 return 0; 464 } 465 *pu64Delta = s_u64OtherRet; 466 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet; 428 467 } 429 468 430 469 /* 431 * TMCLOCK_VIRTUAL_SYNC470 * Check for TMCLOCK_VIRTUAL_SYNC expiration. 432 471 * This isn't quite as stright forward if in a catch-up, not only do 433 472 * we have to adjust the 'now' but when have to adjust the delta as well. … … 460 499 if (i64Delta2 <= 0) 461 500 { 462 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync); 501 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TIMER)) 502 { 503 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__)); 504 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER /** @todo poke */); 505 #ifdef IN_RING3 506 REMR3NotifyTimerPending(pVM, pVCpuDst); 507 #endif 508 } 509 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPVirtualSync); 510 463 511 #ifndef IN_RING3 464 512 if (RT_SUCCESS(rc)) … … 466 514 tmUnlock(pVM); 467 515 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now)); 468 VM_FF_SET(pVM, VM_FF_TIMER); 469 #ifdef IN_RING3 470 REMR3NotifyTimerPending(pVM); 471 #endif 472 *pu64Delta = 0; 473 return 0; 516 if (pVCpuDst == pVCpu) 517 { 518 *pu64Delta = 0; 519 return 0; 520 } 521 *pu64Delta = s_u64OtherRet; 522 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet; 474 523 } 475 524 if (pVM->tm.s.fVirtualSyncCatchUp) 476 525 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100); 477 526 478 /* 479 * Return the GIP time of the next event. 480 * This is the reverse of what tmVirtualGetRaw is doing. 481 */ 482 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss); 483 uint64_t u64GipTime = RT_MIN(i64Delta1, i64Delta2); 484 *pu64Delta = u64GipTime; 485 u64GipTime += u64Now + pVM->tm.s.u64VirtualOffset; 486 if (RT_UNLIKELY(!pVM->tm.s.fVirtualWarpDrive)) 487 { 488 u64GipTime -= pVM->tm.s.u64VirtualWarpDriveStart; /* the start is GIP time. */ 489 u64GipTime *= 100; 490 u64GipTime /= pVM->tm.s.u32VirtualWarpDrivePercentage; 491 u64GipTime += pVM->tm.s.u64VirtualWarpDriveStart; 492 } 493 527 uint64_t u64GipTime; 528 if (pVCpuDst == pVCpu) 529 { 530 /* 531 * Return the GIP time of the next event. 532 * This is the reverse of what tmVirtualGetRaw is doing. 533 */ 534 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPMiss); 535 u64GipTime = RT_MIN(i64Delta1, i64Delta2); 536 *pu64Delta = u64GipTime; 537 u64GipTime += u64Now + pVM->tm.s.u64VirtualOffset; 538 if (RT_UNLIKELY(!pVM->tm.s.fVirtualWarpDrive)) 539 { 540 u64GipTime -= pVM->tm.s.u64VirtualWarpDriveStart; /* the start is GIP time. */ 541 u64GipTime *= 100; 542 u64GipTime /= pVM->tm.s.u32VirtualWarpDrivePercentage; 543 u64GipTime += pVM->tm.s.u64VirtualWarpDriveStart; 544 } 545 } 546 else 547 { 548 *pu64Delta = s_u64OtherRet; 549 u64GipTime = u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet; 550 } 494 551 #ifndef IN_RING3 495 552 if (RT_SUCCESS(rc)) -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r19500 r19660 330 330 * Use the chance to check for expired timers. 331 331 */ 332 if ( fCheckTimers 333 && !VM_FF_ISSET(pVM, VM_FF_TIMER) 334 && !pVM->tm.s.fRunningQueues 335 && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64 336 || ( pVM->tm.s.fVirtualSyncTicking 337 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync 332 if (fCheckTimers) 333 { 334 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 335 if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER) 336 && !pVM->tm.s.fRunningQueues 337 && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64 338 || ( pVM->tm.s.fVirtualSyncTicking 339 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync 340 ) 338 341 ) 339 ) 340 ) 341 { 342 VM_FF_SET(pVM, VM_FF_TIMER); 343 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF); 342 && !pVM->tm.s.fRunningQueues 343 ) 344 { 345 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF); 346 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 347 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 344 348 #ifdef IN_RING3 345 REMR3NotifyTimerPending(pVM); 346 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM); 347 #endif 349 REMR3NotifyTimerPending(pVM, pVCpuDst); 350 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM); 351 #endif 352 } 348 353 } 349 354 } … … 372 377 373 378 /** 374 * Gets the current TMCLOCK_VIRTUAL time 379 * Gets the current TMCLOCK_VIRTUAL time without checking 380 * timers or anything. 381 * 382 * Meaning, this has no side effect on FFs like TMVirtualGet may have. 375 383 * 376 384 * @returns The timestamp. 377 * @param pVM VM handle. 378 * @param fCheckTimers Check timers or not 379 * 380 * @remark While the flow of time will never go backwards, the speed of the 381 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be 382 * influenced by power saving (SpeedStep, PowerNow!), while the former 383 * makes use of TSC and kernel timers. 384 */ 385 VMMDECL(uint64_t) TMVirtualGetEx(PVM pVM, bool fCheckTimers) 386 { 387 return tmVirtualGet(pVM, fCheckTimers); 385 * @param pVM VM handle. 386 * 387 * @remarks See TMVirtualGet. 388 */ 389 VMMDECL(uint64_t) TMVirtualGetNoCheck(PVM pVM) 390 { 391 return tmVirtualGet(pVM, false /*fCheckTimers*/); 388 392 } 389 393 … … 397 401 * @thread EMT. 398 402 */ 399 VMMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers) 400 { 401 uint64_t u64; 403 DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVM pVM, bool fCheckTimers) 404 { 405 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSync); 406 uint64_t u64; 407 402 408 if (pVM->tm.s.fVirtualSyncTicking) 403 409 { 404 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSync);410 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 405 411 406 412 /* … … 409 415 Assert(pVM->tm.s.cVirtualTicking); 410 416 u64 = tmVirtualGetRaw(pVM); 411 if ( fCheckTimers 412 && !VM_FF_ISSET(pVM, VM_FF_TIMER) 413 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64) 417 if (fCheckTimers) 414 418 { 415 VM_FF_SET(pVM, VM_FF_TIMER); 419 if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER) 420 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64) 421 { 422 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__)); 423 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 416 424 #ifdef IN_RING3 417 REMR3NotifyTimerPending(pVM); 418 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM); 419 #endif 420 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF); 425 REMR3NotifyTimerPending(pVM, pVCpuDst); 426 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/); 427 #endif 428 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF); 429 } 421 430 } 422 431 … … 490 499 { 491 500 u64 = u64Expire; 492 int rc = tmTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. */501 int rc = tmTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. FIXME */ 493 502 if (RT_SUCCESS(rc)) 494 503 { 495 504 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64); 496 505 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false); 506 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); 497 507 tmUnlock(pVM); 498 508 } 499 509 if ( fCheckTimers 500 && !VM _FF_ISSET(pVM, VM_FF_TIMER))510 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) 501 511 { 502 VM_FF_SET(pVM, VM_FF_TIMER); 512 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 513 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 503 514 #ifdef IN_RING3 504 REMR3NotifyTimerPending(pVM );505 VMR3Notify GlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);515 REMR3NotifyTimerPending(pVM, pVCpuDst); 516 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM); 506 517 #endif 507 518 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF); … … 516 527 u64 = pVM->tm.s.u64VirtualSync; 517 528 518 /* 519 * If it looks like a halt caused by pending timers, make sure the FF is raised. 520 * This is a safeguard against timer queue runner leaving the virtual sync clock stopped. 521 */ 522 if ( fCheckTimers 523 && pVM->tm.s.cVirtualTicking 524 && !VM_FF_ISSET(pVM, VM_FF_TIMER)) 525 { 526 const uint64_t u64Expire = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire; 527 if (u64 >= u64Expire) 528 { 529 VM_FF_SET(pVM, VM_FF_TIMER); 530 #ifdef IN_RING3 531 REMR3NotifyTimerPending(pVM); 532 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM); 533 #endif 534 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF); 535 Log4(("TM: %RU64/%RU64: exp tmr=>ff (!)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 536 } 537 } 538 } 529 } 530 539 531 return u64; 540 532 } … … 547 539 * @param pVM VM handle. 548 540 * @thread EMT. 541 * @remarks May set the timer and virtual sync FFs. 549 542 */ 550 543 VMMDECL(uint64_t) TMVirtualSyncGet(PVM pVM) 551 544 { 552 return TMVirtualSyncGetEx(pVM, true /* check timers */); 545 return tmVirtualSyncGetEx(pVM, true /* check timers */); 546 } 547 548 549 /** 550 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on 551 * TMCLOCK_VIRTUAL. 552 * 553 * @returns The timestamp. 554 * @param pVM VM handle. 555 * @thread EMT. 556 * @remarks May set the timer and virtual sync FFs. 557 */ 558 VMMDECL(uint64_t) TMVirtualSyncGetNoCheck(PVM pVM) 559 { 560 return tmVirtualSyncGetEx(pVM, false /* check timers */); 561 } 562 563 564 /** 565 * Gets the current TMCLOCK_VIRTUAL_SYNC time. 566 * 567 * @returns The timestamp. 568 * @param pVM VM handle. 569 * @param fCheckTimers Check timers on the virtual clock or not. 570 * @thread EMT. 571 * @remarks May set the timer and virtual sync FFs. 572 */ 573 VMMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers) 574 { 575 return tmVirtualSyncGetEx(pVM, fCheckTimers); 553 576 } 554 577
Note:
See TracChangeset
for help on using the changeset viewer.