Changeset 2984 in vbox
- Timestamp:
- Jun 1, 2007 4:18:07 PM (18 years ago)
- svn:sync-xref-src-repo-rev:
- 21704
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VM.cpp
r2981 r2984 469 469 * Init all R3 components, the order here might be important. 470 470 */ 471 rc = vmR3SetHaltMethod(pVM, VMHALTMETHOD_DEFAULT); 472 AssertRCReturn(rc, rc); 473 471 474 rc = MMR3Init(pVM); 472 475 if (VBOX_SUCCESS(rc)) -
trunk/src/VBox/VMM/VMEmt.cpp
r2981 r2984 38 38 #include <iprt/asm.h> 39 39 #include <iprt/semaphore.h> 40 #include <iprt/string.h> 40 41 #include <iprt/thread.h> 41 42 #include <iprt/time.h> 43 42 44 43 45 … … 166 168 } 167 169 170 168 171 /** 169 172 * Wait for VM to be resumed. Handle events like vmR3EmulationThread does. … … 256 259 } 257 260 258 /**259 * Notify the emulation thread (EMT) about pending Forced Action (FF).260 *261 * This function is called by thread other than EMT to make262 * sure EMT wakes up and promptly service an FF request.263 *264 * @param pVM VM handle.265 * @param fNotifiedREM Set if REM have already been notified. If clear the266 * generic REMR3NotifyFF() method is called.267 */268 VMR3DECL(void) VMR3NotifyFF(PVM pVM, bool fNotifiedREM)269 {270 LogFlow(("VMR3NotifyFF:\n"));271 if (pVM->vm.s.fWait)272 {273 int rc = RTSemEventSignal(pVM->vm.s.EventSemWait);274 AssertRC(rc);275 }276 else if (!fNotifiedREM)277 REMR3NotifyFF(pVM);278 }279 280 261 281 262 /** 282 263 * The old halt loop. 283 264 */ 284 DECLCALLBACK(int) vmR3WaitHaltedOld(PVM pVM, const uint32_t fMask)265 static DECLCALLBACK(int) vmR3HaltOldDoHalt(PVM pVM, const uint32_t fMask, uint64_t /* u64Now*/) 285 266 { 286 267 /* … … 363 344 364 345 /** 346 * Initialize the configuration of halt method 1 & 2. 347 * 348 * @return VBox status code. Failure on invalid CFGM data. 349 * @param pVM The VM handle. 350 */ 351 static int vmR3HaltMethod12ReadConfig(PVM pVM) 352 { 353 /* 354 * The defaults. 355 */ 356 pVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4; 357 pVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000; 358 pVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000; 359 pVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000; 360 pVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000; 361 362 /* 363 * Query overrides. 364 */ 365 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/VMM/HaltedMethod1"); 366 if (pCfg) 367 { 368 369 } 370 371 return VINF_SUCCESS; 372 } 373 374 375 /** 376 * Initialize halt method 1. 377 * 378 * @return VBox status code. 379 * @param pVM The VM handle. 380 */ 381 static DECLCALLBACK(int) vmR3HaltMethod1Init(PVM pVM) 382 { 383 return vmR3HaltMethod12ReadConfig(pVM); 384 } 385 386 387 /** 365 388 * Method 1 - Block whenever possible, and when lagging behind 366 389 * switch to spinning for 10-30ms with occational blocking until 367 390 * the lag has been eliminated. 368 391 */ 369 DECLCALLBACK(int) vmR3WaitHaltedMethod1(PVM pVM, const uint32_t fMask, uint64_t u64Now)370 { 371 /* 372 * To simplify things, we decide up-front whether we should switch 373 * to spinning or not. This makes some assumptions about the cause374 * of the spinning (PIT/RTC/PCNet) and that it will generate interrupts375 * or other events that means we should exitthe halt loop.392 static DECLCALLBACK(int) vmR3HaltMethod1DoHalt(PVM pVM, const uint32_t fMask, uint64_t u64Now) 393 { 394 /* 395 * To simplify things, we decide up-front whether we should switch to spinning or 396 * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet) 397 * and that it will generate interrupts or other events that will cause us to exit 398 * the halt loop. 376 399 */ 377 400 bool fBlockOnce = false; … … 382 405 if (pVM->vm.s.Halt.Method12.u64StartSpinTS) 383 406 { 384 fSpinning = TMVirtualSyncGetLag(pVM) >= 2*1000000;407 fSpinning = TMVirtualSyncGetLag(pVM) >= pVM->vm.s.Halt.Method12.u32StopSpinningCfg; 385 408 if (fSpinning) 386 409 { 387 410 uint64_t u64Lag = TMVirtualSyncGetLag(pVM); 388 411 fBlockOnce = u64Now - pVM->vm.s.Halt.Method12.u64LastBlockTS 389 > RT_MAX(5*1000000, RT_MIN(u64Lag / 4, 200*1000000)); 412 > RT_MAX(pVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg, 413 RT_MIN(u64Lag / pVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg, 414 pVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg)); 390 415 } 391 416 else 392 417 { 393 RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);418 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000); 394 419 pVM->vm.s.Halt.Method12.u64StartSpinTS = 0; 395 420 } … … 397 422 else 398 423 { 399 fSpinning = TMVirtualSyncGetLag(pVM) >= 20*1000000;424 fSpinning = TMVirtualSyncGetLag(pVM) >= pVM->vm.s.Halt.Method12.u32StartSpinningCfg; 400 425 if (fSpinning) 401 426 pVM->vm.s.Halt.Method12.u64StartSpinTS = u64Now; … … 404 429 else if (pVM->vm.s.Halt.Method12.u64StartSpinTS) 405 430 { 406 RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);431 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000); 407 432 pVM->vm.s.Halt.Method12.u64StartSpinTS = 0; 408 433 } … … 450 475 else 451 476 cMilliSecs -= pVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg; 452 RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);477 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS); 453 478 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltBlock, a); 454 479 rc = RTSemEventWait(pVM->vm.s.EventSemWait, cMilliSecs); … … 483 508 } 484 509 } 485 RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");510 //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : ""); 486 511 487 512 /* … … 493 518 } 494 519 } 495 if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);520 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct); 496 521 497 522 return rc; 523 } 524 525 526 /** 527 * Default VMR3Wait() worker. 528 * 529 * @returns VBox status code. 530 * @param pVM The VM handle. 531 */ 532 static DECLCALLBACK(int) vmR3DefaultWait(PVM pVM) 533 { 534 int rc = VINF_SUCCESS; 535 ASMAtomicXchgU32(&pVM->vm.s.fWait, 1); 536 for (;;) 537 { 538 /* 539 * Check Relevant FFs. 540 */ 541 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)) 542 break; 543 544 /* 545 * Wait for a while. Someone will wake us up or interrupt the call if 546 * anything needs our attention. 547 */ 548 rc = RTSemEventWait(pVM->vm.s.EventSemWait, 1000); 549 if (rc == VERR_TIMEOUT) 550 rc = VINF_SUCCESS; 551 else if (VBOX_FAILURE(rc)) 552 { 553 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc)); 554 VM_FF_SET(pVM, VM_FF_TERMINATE); 555 rc = VERR_INTERNAL_ERROR; 556 break; 557 } 558 559 } 560 ASMAtomicXchgU32(&pVM->vm.s.fWait, 0); 561 return rc; 562 } 563 564 565 /** 566 * Default VMR3NotifyFF() worker. 567 * 568 * @param pVM The VM handle. 569 * @param fNotifiedREM Se VMR3NotifyFF(). 570 */ 571 static DECLCALLBACK(void) vmR3DefaultNotifyFF(PVM pVM, bool fNotifiedREM) 572 { 573 if (pVM->vm.s.fWait) 574 { 575 int rc = RTSemEventSignal(pVM->vm.s.EventSemWait); 576 AssertRC(rc); 577 } 578 else if (!fNotifiedREM) 579 REMR3NotifyFF(pVM); 580 } 581 582 583 /** 584 * Array with halt method descriptors. 585 * VMINT::iHaltMethod contains an index into this array. 586 */ 587 static const struct VMHALTMETHODDESC 588 { 589 /** The halt method id. */ 590 VMHALTMETHOD enmHaltMethod; 591 /** The init function for loading config and initialize variables. */ 592 DECLR3CALLBACKMEMBER(int, pfnInit,(PVM pVM)); 593 /** The term function. */ 594 DECLR3CALLBACKMEMBER(void, pfnTerm,(PVM pVM)); 595 /** The halt function. */ 596 DECLR3CALLBACKMEMBER(int, pfnHalt,(PVM pVM, const uint32_t fMask, uint64_t u64Now)); 597 /** The wait function. */ 598 DECLR3CALLBACKMEMBER(int, pfnWait,(PVM pVM)); 599 /** The notifyFF function. */ 600 DECLR3CALLBACKMEMBER(void, pfnNotifyFF,(PVM pVM, bool fNotifiedREM)); 601 } g_aHaltMethods[] = 602 { 603 { VMHALTMETHOD_OLD, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyFF }, 604 { VMHALTMETHOD_1, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1DoHalt, vmR3DefaultWait, vmR3DefaultNotifyFF }, 605 //{ VMHALTMETHOD_2, vmR3HaltMethod2Init, vmR3HaltMethod2Term, vmR3HaltMethod2DoWait, vmR3HaltMethod2Wait, vmR3HaltMethod2NotifyFF }, 606 }; 607 608 609 /** 610 * Notify the emulation thread (EMT) about pending Forced Action (FF). 611 * 612 * This function is called by thread other than EMT to make 613 * sure EMT wakes up and promptly service an FF request. 614 * 615 * @param pVM VM handle. 616 * @param fNotifiedREM Set if REM have already been notified. If clear the 617 * generic REMR3NotifyFF() method is called. 618 */ 619 VMR3DECL(void) VMR3NotifyFF(PVM pVM, bool fNotifiedREM) 620 { 621 LogFlow(("VMR3NotifyFF:\n")); 622 g_aHaltMethods[pVM->vm.s.iHaltMethod].pfnNotifyFF(pVM, fNotifiedREM); 498 623 } 499 624 … … 555 680 * Do the halt. 556 681 */ 557 #if 1 558 int rc = vmR3WaitHaltedOld(pVM, fMask); 559 #elif 0 /* work in progress */ 560 int rc = vmR3WaitHaltedMethod1(pVM, fMask, u64Now); 561 #else 562 # error "misconfigured halt" 563 #endif 682 int rc = g_aHaltMethods[pVM->vm.s.iHaltMethod].pfnHalt(pVM, fMask, u64Now); 564 683 565 684 /* … … 597 716 } 598 717 599 int rc = VINF_SUCCESS; 600 ASMAtomicXchgU32(&pVM->vm.s.fWait, 1); 601 for (;;) 602 { 603 /* 604 * Check Relevant FFs. 605 */ 606 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)) 607 break; 608 609 /* 610 * Wait for a while. Someone will wake us up or interrupt the call if 611 * anything needs our attention. 612 */ 613 rc = RTSemEventWait(pVM->vm.s.EventSemWait, 1000); 614 if (rc == VERR_TIMEOUT) 615 rc = VINF_SUCCESS; 616 else if (VBOX_FAILURE(rc)) 617 { 618 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc)); 619 VM_FF_SET(pVM, VM_FF_TERMINATE); 620 rc = VERR_INTERNAL_ERROR; 621 break; 622 } 623 624 } 625 ASMAtomicXchgU32(&pVM->vm.s.fWait, 0); 626 718 /* 719 * Do waiting according to the halt method (so VMR3NotifyFF 720 * doesn't have to special case anything). 721 */ 722 int rc = g_aHaltMethods[pVM->vm.s.iHaltMethod].pfnWait(pVM); 627 723 LogFlow(("VMR3Wait: returns %Vrc (FF %#x)\n", rc, pVM->fForcedActions)); 628 724 return rc; 629 725 } 630 726 727 728 /** 729 * Changes the halt method. 730 * 731 * @returns VBox status code. 732 * @param pVM The VM handle. 733 * @param enmHaltMethod The new halt method. 734 * @thread EMT. 735 */ 736 int vmR3SetHaltMethod(PVM pVM, VMHALTMETHOD enmHaltMethod) 737 { 738 VM_ASSERT_EMT(pVM); 739 AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER); 740 741 /* 742 * Resolve default (can be overridden in the configuration). 743 */ 744 if (enmHaltMethod == VMHALTMETHOD_DEFAULT) 745 { 746 uint32_t u32; 747 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32); 748 if (VBOX_SUCCESS(rc)) 749 { 750 enmHaltMethod = (VMHALTMETHOD)u32; 751 if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END) 752 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d."), enmHaltMethod); 753 } 754 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND) 755 return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t.")); 756 else 757 enmHaltMethod = VMHALTMETHOD_OLD; 758 //enmHaltMethod = VMHALTMETHOD_1; 759 } 760 761 /* 762 * Find the descriptor. 763 */ 764 unsigned i = 0; 765 while ( i < RT_ELEMENTS(g_aHaltMethods) 766 && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod) 767 i++; 768 AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER); 769 770 /* 771 * Terminate the old one. 772 */ 773 if ( pVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID 774 && g_aHaltMethods[pVM->vm.s.iHaltMethod].pfnTerm) 775 { 776 g_aHaltMethods[pVM->vm.s.iHaltMethod].pfnTerm(pVM); 777 pVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID; 778 } 779 780 /* 781 * Init the new one. 782 */ 783 memset(&pVM->vm.s.Halt, 0, sizeof(pVM->vm.s.Halt)); 784 if (g_aHaltMethods[i].pfnInit) 785 { 786 int rc = g_aHaltMethods[i].pfnInit(pVM); 787 AssertRCReturn(rc, rc); 788 } 789 pVM->vm.s.enmHaltMethod = enmHaltMethod; 790 ASMAtomicXchgU32(&pVM->vm.s.iHaltMethod, i); 791 return VINF_SUCCESS; 792 } 793 -
trunk/src/VBox/VMM/VMInternal.h
r2981 r2984 191 191 } VMRUNTIMEERROR, *PVMRUNTIMEERROR; 192 192 193 /** The halt method. */ 194 typedef enum 195 { 196 /** The usual invalid value. */ 197 VMHALTMETHOD_INVALID = 0, 198 /** Use the default method. */ 199 VMHALTMETHOD_DEFAULT, 200 /** The old spin/yield/block method. */ 201 VMHALTMETHOD_OLD, 202 /** The first go at a block/spin method. */ 203 VMHALTMETHOD_1, 204 /** The end of valid methods. (not inclusive of course) */ 205 VMHALTMETHOD_END, 206 /** The usual 32-bit max value. */ 207 VMHALTMETHOD_32BIT_HACK = 0x7fffffff 208 } VMHALTMETHOD; 209 193 210 194 211 /** … … 259 276 bool fPreventSaveState; 260 277 261 /** vmR3EmulationThread longjmp buffer 278 /** vmR3EmulationThread longjmp buffer 262 279 * @todo r=bird: requires union with padding. See EMInternal.h. */ 263 280 jmp_buf emtJumpEnv; 264 281 265 /** @name Yield282 /** @name Generic Halt data 266 283 * @{ 267 284 */ 285 /** The current halt method. 286 * Can be selected by CFGM option 'VM/HaltMethod'. */ 287 VMHALTMETHOD enmHaltMethod; 288 /** The index into g_aHaltMethods of the current halt method. */ 289 uint32_t volatile iHaltMethod; 268 290 /** The average time (ns) between two halts in the last second. (updated once per second) */ 269 291 uint32_t HaltInterval; … … 272 294 /** The number of halts in the current period. */ 273 295 uint32_t cHalts; 274 uint32_t padding 0;/**< alignment padding. */296 uint32_t padding; /**< alignment padding. */ 275 297 /** When we started counting halts in cHalts (RTTimeNanoTS). */ 276 298 uint64_t u64HaltsStartTS; 299 /** @} */ 277 300 278 301 /** Union containing data and config for the different halt algorithms. */ 279 union 302 union 280 303 { 281 /** 282 * Method 1 & 2 - Block whenever possible, and when lagging behind 283 * switch to spinning for 10-30ms with occational blocking until 284 * the lag has been eliminated. 285 * 286 * The difference between 1 and 2 is that we use native absolute 304 /** 305 * Method 1 & 2 - Block whenever possible, and when lagging behind 306 * switch to spinning with regular blocking every 5-200ms (defaults) 307 * depending on the accumulated lag. The blocking interval is adjusted 308 * with the average oversleeping of the last 64 times. 309 * 310 * The difference between 1 and 2 is that we use native absolute 287 311 * time APIs for the blocking instead of the millisecond based IPRT 288 312 * interface. 289 313 */ 290 struct 291 { 292 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */ 293 uint32_t cBlocks; 294 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */ 295 uint64_t cNSBlockedTooLongAvg; 296 /** Total time spend oversleeping when blocking. */ 297 uint64_t cNSBlockedTooLong; 298 /** Total time spent blocking. */ 299 uint64_t cNSBlocked; 300 /** The timestamp (RTTimeNanoTS) of the last block. */ 301 uint64_t u64LastBlockTS; 302 303 /** When we started spinning relentlessly in order to catch up some of the oversleeping. 304 * This is 0 when we're not spinning. */ 305 uint64_t u64StartSpinTS; 306 } Method12; 314 struct 315 { 316 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */ 317 uint32_t cBlocks; 318 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */ 319 uint64_t cNSBlockedTooLongAvg; 320 /** Total time spend oversleeping when blocking. */ 321 uint64_t cNSBlockedTooLong; 322 /** Total time spent blocking. */ 323 uint64_t cNSBlocked; 324 /** The timestamp (RTTimeNanoTS) of the last block. */ 325 uint64_t u64LastBlockTS; 326 327 /** When we started spinning relentlessly in order to catch up some of the oversleeping. 328 * This is 0 when we're not spinning. */ 329 uint64_t u64StartSpinTS; 330 331 /** The max interval without blocking (when spinning). */ 332 uint32_t u32MinBlockIntervalCfg; 333 /** The minimum interval between blocking (when spinning). */ 334 uint32_t u32MaxBlockIntervalCfg; 335 /** The value to divide the current lag by to get the raw blocking interval (when spinning). */ 336 uint32_t u32LagBlockIntervalDivisorCfg; 337 /** When to start spinning (lag / nano secs). */ 338 uint32_t u32StartSpinningCfg; 339 /** When to stop spinning (lag / nano secs). */ 340 uint32_t u32StopSpinningCfg; 341 } Method12; 307 342 308 343 #if 0 309 /** 344 /** 310 345 * Method 3 & 4 - Same as method 1 & 2 respectivly, except that we 311 346 * sprinkle it with yields. … … 374 409 375 410 DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArg); 411 int vmR3SetHaltMethod(PVM pVM, VMHALTMETHOD enmHaltMethod); 376 412 DECLCALLBACK(int) vmR3Destroy(PVM pVM); 377 413 DECLCALLBACK(void) vmR3SetErrorV(PVM pVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *args);
Note:
See TracChangeset
for help on using the changeset viewer.