- Timestamp:
- Sep 28, 2009 5:24:02 PM (15 years ago)
- svn:sync-xref-src-repo-rev:
- 52923
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGM.cpp
r23307 r23393 1568 1568 STAM_REL_REG(pVM, &pPGM->cSharedPages, STAMTYPE_U32, "/PGM/Page/cSharedPages", STAMUNIT_OCCURENCES, "The number of shared pages."); 1569 1569 STAM_REL_REG(pVM, &pPGM->cZeroPages, STAMTYPE_U32, "/PGM/Page/cZeroPages", STAMUNIT_OCCURENCES, "The number of zero backed pages."); 1570 STAM_REL_REG(pVM, &pPGM->cMonitoredPages, STAMTYPE_U32, "/PGM/Page/cMonitoredPages", STAMUNIT_OCCURENCES, "The number of write monitored pages."); 1571 STAM_REL_REG(pVM, &pPGM->cWrittenToPages, STAMTYPE_U32, "/PGM/Page/cWrittenToPages", STAMUNIT_OCCURENCES, "The number of previously write monitored pages that have been written to."); 1570 1572 STAM_REL_REG(pVM, &pPGM->cHandyPages, STAMTYPE_U32, "/PGM/Page/cHandyPages", STAMUNIT_OCCURENCES, "The number of handy pages (not included in cAllPages)."); 1571 1573 STAM_REL_REG(pVM, &pPGM->cRelocations, STAMTYPE_COUNTER, "/PGM/cRelocations", STAMUNIT_OCCURENCES, "Number of hypervisor relocations."); -
trunk/src/VBox/VMM/PGMInternal.h
r23372 r23393 1032 1032 /** Was a ZERO page last time around. */ 1033 1033 uint32_t fZero : 1; 1034 /** Was a SHARED page last time around. */ 1035 uint32_t fShared : 1; 1036 /** Whether the page is/was write monitored in a previous pass. */ 1037 uint32_t fWriteMonitored : 1; 1038 /** Whether the page is/was write monitored earlier in this pass. */ 1039 uint32_t fWriteMonitoredJustNow : 1; 1034 1040 /** Bits reserved for future use. */ 1035 uint32_t u 5Reserved : 5;1041 uint32_t u2Reserved : 2; 1036 1042 } PGMLIVESAVEPAGE; 1037 1043 AssertCompileSize(PGMLIVESAVEPAGE, 8); 1038 1044 /** Pointer to the per page live save tracking data. */ 1039 1045 typedef PGMLIVESAVEPAGE *PPGMLIVESAVEPAGE; 1046 1047 /** The max value of PGMLIVESAVEPAGE::cDirtied. */ 1048 #define PGMLIVSAVEPAGE_MAX_DIRTIED 0x00fffff0 1040 1049 1041 1050 … … 2583 2592 /** The number of ready pages. */ 2584 2593 uint32_t cReadyPages; 2585 /** The number of dirty pages. */2594 /** The number of dirty pages. (Not counting MMIO and MMIO2 pages.) */ 2586 2595 uint32_t cDirtyPages; 2587 2596 /** The number of MMIO and MMIO2 pages. */ 2588 2597 uint32_t cMmioPages; 2589 uint32_t u32; 2598 /** The number of monitored pages. */ 2599 uint32_t cMonitoredPages; 2590 2600 } LiveSave; 2591 2601 … … 2605 2615 uint32_t cSharedPages; /**< The number of shared pages. */ 2606 2616 uint32_t cZeroPages; /**< The number of zero backed pages. */ 2617 uint32_t cMonitoredPages; /**< The number of write monitored pages. */ 2618 uint32_t cWrittenToPages; /**< The number of previously write monitored pages. */ 2607 2619 2608 2620 /** The number of times we were forced to change the hypervisor region location. */ -
trunk/src/VBox/VMM/PGMPhys.cpp
r23306 r23393 360 360 rc = VERR_PGM_PHYS_PAGE_RESERVED; 361 361 } 362 else 363 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) 362 else if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) 364 363 #ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT 365 || pgmPoolIsDirtyPage(pVM, *pGCPhys)364 || pgmPoolIsDirtyPage(pVM, *pGCPhys) 366 365 #endif 367 )366 ) 368 367 { 369 368 /* We *must* flush any corresponding pgm pool page here, otherwise we'll -
trunk/src/VBox/VMM/PGMSavedState.cpp
r23307 r23393 56 56 * Defined Constants And Macros * 57 57 *******************************************************************************/ 58 /** Saved state data unit version for 2.5.x and later. */ 59 #define PGM_SAVED_STATE_VERSION 9 58 /** Saved state data unit version. */ 59 #ifdef VBOX_WITH_LIVE_MIGRATION 60 # define PGM_SAVED_STATE_VERSION 10 61 #else 62 # define PGM_SAVED_STATE_VERSION 9 63 #endif 64 /** Saved state data unit version for 3.0. (pre live migration) */ 65 #define PGM_SAVED_STATE_VERSION_3_0_0 9 60 66 /** Saved state data unit version for 2.2.2 and later. */ 61 67 #define PGM_SAVED_STATE_VERSION_2_2_2 8 … … 64 70 /** Saved state data unit version. */ 65 71 #define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6 72 73 74 /** @name Sparse state record types 75 * @{ */ 76 /** Zero page. No data. */ 77 #define PGM_STATE_REC_ZERO UINT8_C(0x00) 78 /** Raw page. */ 79 #define PGM_STATE_REC_RAW UINT8_C(0x01) 80 /** Shadowed ROM page. */ 81 #define PGM_STATE_REC_SHADOWED_ROM UINT8_C(0x02) 82 /** The last record type. */ 83 #define PGM_STATE_REC_LAST PGM_STATE_REC_SHADOWED_ROM 84 /** End marker. */ 85 #define PGM_STATE_REC_END UINT8_C(0xff) 86 /** Flag indicating that the data is preceeded by an RTGCPHYS containing the 87 * page address. If not set, the page follows the immediately after the 88 * previous one. */ 89 #define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80) 90 /** @} */ 66 91 67 92 … … 256 281 && !PGM_RAM_RANGE_IS_AD_HOC(pCur)) 257 282 { 258 uint32_t const idRamRange Gen = pVM->pgm.s.idRamRangesGen;283 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen; 259 284 uint32_t const cPages = pCur->cb >> PAGE_SHIFT; 260 285 pgmUnlock(pVM); … … 263 288 return VERR_NO_MEMORY; 264 289 pgmLock(pVM); 265 if (pVM->pgm.s.idRamRangesGen != idRamRange Gen)290 if (pVM->pgm.s.idRamRangesGen != idRamRangesGen) 266 291 { 267 292 pgmUnlock(pVM); … … 279 304 { 280 305 PCPGMPAGE pPage = &pCur->aPages[iPage]; 281 paLSPages[iPage].uPassSaved = UINT32_MAX; 282 paLSPages[iPage].cDirtied = 0; 283 paLSPages[iPage].u5Reserved = 0; 306 paLSPages[iPage].uPassSaved = UINT32_MAX; 307 paLSPages[iPage].cDirtied = 0; 308 paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */ 309 paLSPages[iPage].fWriteMonitored = 0; 310 paLSPages[iPage].fWriteMonitoredJustNow = 0; 311 paLSPages[iPage].u2Reserved = 0; 284 312 switch (PGM_PAGE_GET_TYPE(pPage)) 285 313 { … … 289 317 if (PGM_PAGE_IS_ZERO(pPage)) 290 318 { 291 paLSPages[iPage].fZero = 1; 292 paLSPages[iPage].fDirty = 0; 293 pVM->pgm.s.LiveSave.cReadyPages++; 319 paLSPages[iPage].fZero = 1; 320 paLSPages[iPage].fShared = 0; 321 } 322 else if (PGM_PAGE_IS_SHARED(pPage)) 323 { 324 paLSPages[iPage].fZero = 0; 325 paLSPages[iPage].fShared = 1; 294 326 } 295 327 else 296 328 { 297 paLSPages[iPage].fZero = 0; 298 paLSPages[iPage].fDirty = 1; 299 pVM->pgm.s.LiveSave.cDirtyPages++; 329 paLSPages[iPage].fZero = 0; 330 paLSPages[iPage].fShared = 0; 300 331 } 301 332 paLSPages[iPage].fMmio = 0; 333 pVM->pgm.s.LiveSave.cDirtyPages++; 302 334 break; 335 303 336 default: 304 337 AssertMsgFailed(("%R[pgmpage]", pPage)); … … 328 361 329 362 /** 363 * pgmR3LiveExec part 1: Scan for page modifications and reprotect them. 364 * 365 * Note! Since we don't care about MMIO or MMIO2 pages and since we don't 366 * have any movable ROMs yet, we can safely yield the PGM when we 367 * detect contention. 368 * 369 * This holds true for part 2 as well. 370 * 371 * @param pVM The VM handle. 372 */ 373 static void pgmR3LiveExecPart1(PVM pVM) 374 { 375 RTGCPHYS GCPhysCur = 0; 376 PPGMRAMRANGE pCur; 377 pgmLock(pVM); 378 do 379 { 380 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen; 381 uint32_t cSinceYield = 0; 382 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3) 383 { 384 if (pCur->GCPhysLast > GCPhysCur) 385 { 386 PPGMLIVESAVEPAGE paLSPages = pCur->paLSPages; 387 uint32_t cPages = pCur->cb >> PAGE_SHIFT; 388 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT; 389 GCPhysCur = 0; 390 for (; iPage < cPages; iPage++, cSinceYield++) 391 { 392 /* Do yield first. */ 393 if ( (cSinceYield & 0x7ff) == 0x7ff 394 && PDMR3CritSectYield(&pVM->pgm.s.CritSect) 395 && pVM->pgm.s.idRamRangesGen != idRamRangesGen) 396 { 397 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT); 398 break; /* restart */ 399 } 400 401 /* Process the page. */ 402 if (!paLSPages[iPage].fMmio) 403 { 404 switch (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage])) 405 { 406 case PGMPAGETYPE_RAM: 407 case PGMPAGETYPE_ROM_SHADOW: /** @todo ROM shadowing needs checking out later. (ignoring it for now) */ 408 case PGMPAGETYPE_ROM: 409 { 410 switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage])) 411 { 412 case PGM_PAGE_STATE_ALLOCATED: 413 /** @todo Optimize this: Don't always re-enable write 414 * monitoring if the page is known to be very busy. */ 415 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage])) 416 { 417 Assert(paLSPages[iPage].fWriteMonitored); 418 PGM_PAGE_CLEAR_WRITTEN_TO(&pCur->aPages[iPage]); 419 Assert(pVM->pgm.s.cWrittenToPages > 0); 420 pVM->pgm.s.cWrittenToPages--; 421 } 422 else 423 { 424 Assert(!paLSPages[iPage].fWriteMonitored); 425 pVM->pgm.s.LiveSave.cMonitoredPages++; 426 } 427 428 if (!paLSPages[iPage].fDirty) 429 { 430 pVM->pgm.s.LiveSave.cDirtyPages++; 431 pVM->pgm.s.LiveSave.cReadyPages--; 432 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED) 433 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED; 434 } 435 436 PGM_PAGE_SET_STATE(&pCur->aPages[iPage], PGM_PAGE_STATE_WRITE_MONITORED); 437 pVM->pgm.s.cMonitoredPages++; 438 paLSPages[iPage].fWriteMonitored = 1; 439 paLSPages[iPage].fWriteMonitoredJustNow = 1; 440 paLSPages[iPage].fDirty = 1; 441 paLSPages[iPage].fZero = 0; 442 paLSPages[iPage].fShared = 0; 443 break; 444 445 case PGM_PAGE_STATE_WRITE_MONITORED: 446 Assert(paLSPages[iPage].fWriteMonitored); 447 paLSPages[iPage].fWriteMonitoredJustNow = 0; 448 break; 449 450 case PGM_PAGE_STATE_ZERO: 451 if (!paLSPages[iPage].fZero) 452 { 453 paLSPages[iPage].fZero = 1; 454 paLSPages[iPage].fShared = 0; 455 if (!paLSPages[iPage].fDirty) 456 { 457 paLSPages[iPage].fDirty = 1; 458 pVM->pgm.s.LiveSave.cReadyPages--; 459 pVM->pgm.s.LiveSave.cDirtyPages++; 460 } 461 } 462 break; 463 464 case PGM_PAGE_STATE_SHARED: 465 if (!paLSPages[iPage].fShared) 466 { 467 paLSPages[iPage].fZero = 0; 468 paLSPages[iPage].fShared = 1; 469 if (!paLSPages[iPage].fDirty) 470 { 471 paLSPages[iPage].fDirty = 1; 472 pVM->pgm.s.LiveSave.cReadyPages--; 473 pVM->pgm.s.LiveSave.cDirtyPages++; 474 } 475 } 476 break; 477 } 478 break; 479 } 480 481 default: 482 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); 483 case PGMPAGETYPE_MMIO2: 484 case PGMPAGETYPE_MMIO2_ALIAS_MMIO: 485 case PGMPAGETYPE_MMIO: 486 if (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_MMIO) 487 { 488 paLSPages[iPage].fZero = 0; 489 paLSPages[iPage].fDirty = 1; 490 paLSPages[iPage].fMmio = 1; 491 } 492 else 493 { 494 paLSPages[iPage].fZero = 1; 495 paLSPages[iPage].fDirty = 1; 496 paLSPages[iPage].fMmio = 1; 497 } 498 if (paLSPages[iPage].fWriteMonitored) 499 { 500 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED)) 501 { 502 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */ 503 PGM_PAGE_SET_STATE(&pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED); 504 Assert(pVM->pgm.s.cMonitoredPages > 0); 505 pVM->pgm.s.cMonitoredPages--; 506 } 507 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage])) 508 { 509 PGM_PAGE_CLEAR_WRITTEN_TO(&pCur->aPages[iPage]); 510 Assert(pVM->pgm.s.cWrittenToPages > 0); 511 pVM->pgm.s.cWrittenToPages--; 512 } 513 pVM->pgm.s.LiveSave.cMonitoredPages--; 514 } 515 pVM->pgm.s.LiveSave.cMmioPages++; 516 break; 517 } 518 } 519 } /* for each page in range */ 520 521 if (GCPhysCur != 0) 522 break; /* Yield + ramrange change */ 523 GCPhysCur = pCur->GCPhysLast; 524 } 525 } /* for each range */ 526 } while (pCur); 527 pgmUnlock(pVM); 528 } 529 530 531 /** 532 * pgmR3LiveExec part 2: Save quiescent pages. 533 * 534 * @returns VBox status code. 535 * @param pVM The VM handle. 536 * @param pSSM The SSM handle. 537 * @param uPass The pass. 538 */ 539 static int pgmR3LiveExecPart2(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass) 540 { 541 RTGCPHYS GCPhysLast = NIL_RTGCPHYS; 542 RTGCPHYS GCPhysCur = 0; 543 PPGMRAMRANGE pCur; 544 pgmLock(pVM); 545 do 546 { 547 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen; 548 uint32_t cSinceYield = 0; 549 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3) 550 { 551 if (pCur->GCPhysLast > GCPhysCur) 552 { 553 PPGMLIVESAVEPAGE paLSPages = pCur->paLSPages; 554 uint32_t cPages = pCur->cb >> PAGE_SHIFT; 555 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT; 556 GCPhysCur = 0; 557 for (; iPage < cPages; iPage++, cSinceYield++) 558 { 559 /* Do yield first. */ 560 if ( (cSinceYield & 0x7ff) == 0x7ff 561 && PDMR3CritSectYield(&pVM->pgm.s.CritSect) 562 && pVM->pgm.s.idRamRangesGen != idRamRangesGen) 563 { 564 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT); 565 break; /* restart */ 566 } 567 568 /* 569 * Save dirty pages that hasn't changed since part 1. 570 * (Use if instead of switch here so we can easily break out of the loop.) 571 */ 572 int rc; 573 if ( paLSPages[iPage].fDirty 574 && !paLSPages[iPage].fMmio 575 && PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) != PGM_PAGE_STATE_ALLOCATED) 576 { 577 if ( !paLSPages[iPage].fZero 578 && PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) != PGM_PAGE_STATE_ZERO) 579 { 580 AssertMsg( PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM 581 || PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_ROM_SHADOW 582 || PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_ROM, 583 ("%R[pgmpage]", &pCur->aPages[iPage])); 584 Assert(!paLSPages[iPage].fZero); 585 if (!paLSPages[iPage].fWriteMonitoredJustNow) 586 { 587 /* 588 * Copy the page and then save it outside the lock (since any 589 * SSM call may block). 590 */ 591 char abPage[PAGE_SIZE]; 592 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT); 593 void const *pvPage; 594 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage); 595 if (RT_SUCCESS(rc)) 596 memcpy(abPage, pvPage, PAGE_SIZE); 597 pgmUnlock(pVM); 598 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc); 599 600 if (GCPhys == GCPhysLast + PAGE_SIZE) 601 SSMR3PutU8(pSSM, PGM_STATE_REC_RAW); 602 else 603 { 604 SSMR3PutU8(pSSM, PGM_STATE_REC_RAW | PGM_STATE_REC_FLAG_ADDR); 605 SSMR3PutGCPhys(pSSM, GCPhys); 606 } 607 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE); 608 if (RT_FAILURE(rc)) 609 return rc; 610 611 pgmLock(pVM); 612 GCPhysLast = GCPhys; 613 paLSPages[iPage].fDirty = 0; 614 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen) 615 { 616 GCPhysCur = GCPhys | PAGE_OFFSET_MASK; 617 break; /* restart */ 618 } 619 } 620 } 621 else if ( paLSPages[iPage].fZero 622 && ( PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM 623 || PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_ROM_SHADOW 624 || PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_ROM)) 625 { 626 /* 627 * Dirty zero page. 628 */ 629 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT); 630 pgmUnlock(pVM); 631 632 if (GCPhys == GCPhysLast + PAGE_SIZE) 633 rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAW); 634 else 635 { 636 SSMR3PutU8(pSSM, PGM_STATE_REC_RAW | PGM_STATE_REC_FLAG_ADDR); 637 rc = SSMR3PutGCPhys(pSSM, GCPhys); 638 } 639 if (RT_FAILURE(rc)) 640 return rc; 641 642 pgmLock(pVM); 643 GCPhysLast = GCPhys; 644 paLSPages[iPage].fDirty = 0; 645 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen) 646 { 647 GCPhysCur = GCPhys | PAGE_OFFSET_MASK; 648 break; /* restart */ 649 } 650 } 651 } 652 } /* for each page in range */ 653 654 if (GCPhysCur != 0) 655 break; /* Yield + ramrange change */ 656 GCPhysCur = pCur->GCPhysLast; 657 } 658 } /* for each range */ 659 } while (pCur); 660 pgmUnlock(pVM); 661 662 return VINF_SUCCESS; 663 } 664 665 666 /** 330 667 * Execute a live save pass. 331 668 * … … 337 674 static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass) 338 675 { 339 return VINF_SUCCESS; 676 pgmR3LiveExecPart1(pVM); 677 pgmR3PoolClearAll(pVM); /** @todo this could perhaps be optimized a bit. */ 678 return pgmR3LiveExecPart2(pVM, pSSM, uPass); 340 679 } 341 680 … … 518 857 519 858 /** 859 * Worker for pgmR3Load and pgmR3LoadLocked. 860 * 861 * @returns VBox status code. 862 * 863 * @param pVM The VM handle. 864 * @param pSSM The SSM handle. 865 * @param uVersion The saved state version. 866 */ 867 static int pgmR3LoadLockedMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass) 868 { 869 /* 870 * Process page records until we hit the terminator. 871 */ 872 PPGMRAMRANGE pRamHint = NULL; 873 RTGCPHYS GCPhysLast = NIL_RTGCPHYS; 874 for (;;) 875 { 876 /* Get the record type and flags. */ 877 uint8_t u8; 878 int rc = SSMR3GetU8(pSSM, &u8); 879 if (RT_FAILURE(rc)) 880 return rc; 881 if (u8 == PGM_STATE_REC_END) 882 return VINF_SUCCESS; 883 AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED); 884 885 /* Get the address. */ 886 RTGCPHYS GCPhys; 887 if (!(u8 & PGM_STATE_REC_FLAG_ADDR)) 888 { 889 AssertLogRelReturn(GCPhysLast != NIL_RTGCPHYS, VERR_SSM_DATA_UNIT_FORMAT_CHANGED); 890 GCPhys = GCPhysLast + PAGE_SIZE; 891 } 892 else 893 { 894 rc = SSMR3GetGCPhys(pSSM, &GCPhys); 895 if (RT_FAILURE(rc)) 896 return rc; 897 AssertLogRelMsgReturn(GCPhys & PAGE_OFFSET_MASK, ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED); 898 } 899 900 /* Get the ram range and page. */ 901 PPGMPAGE pPage; 902 rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, GCPhys, &pPage, &pRamHint); 903 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc); 904 905 /* 906 * Take action according to the record type. 907 */ 908 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR) 909 { 910 case PGM_STATE_REC_ZERO: 911 if (!PGM_PAGE_IS_ZERO(pPage)) 912 { 913 /* dispose of the page if possible */ 914 } 915 break; 916 917 case PGM_STATE_REC_RAW: 918 /* map the page for writing and load the bits onto it. */ 919 break; 920 921 case PGM_STATE_REC_SHADOWED_ROM: 922 { 923 /* map the page for writing and load the bits onto it. */ 924 925 break; 926 } 927 928 default: 929 AssertMsgFailedReturn(("%#x\n", u8), VERR_INTERNAL_ERROR); 930 } 931 } 932 } 933 934 935 /** 520 936 * Load an ignored page. 521 937 * … … 673 1089 } 674 1090 675 676 /** 677 * Worker for pgmR3Load. 1091 /** 1092 * Ram range flags and bits for older versions of the saved state. 678 1093 * 679 1094 * @returns VBox status code. 680 1095 * 681 * @param pVM The VM handle. 682 * @param pSSM The SSM handle. 683 * @param uVersion The saved state version. 684 */ 685 static int pgmR3LoadLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion) 686 { 687 PPGM pPGM = &pVM->pgm.s; 688 int rc; 689 uint32_t u32Sep; 690 691 /* 692 * Load basic data (required / unaffected by relocation). 693 */ 694 if (uVersion >= PGM_SAVED_STATE_VERSION) 695 { 696 rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFields[0]); 697 AssertLogRelRCReturn(rc, rc); 698 699 for (VMCPUID i = 0; i < pVM->cCpus; i++) 700 { 701 rc = SSMR3GetStruct(pSSM, &pVM->aCpus[i].pgm.s, &s_aPGMCpuFields[0]); 702 AssertLogRelRCReturn(rc, rc); 703 } 704 } 705 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC) 706 { 707 AssertRelease(pVM->cCpus == 1); 708 709 PGMOLD pgmOld; 710 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]); 711 AssertLogRelRCReturn(rc, rc); 712 713 pPGM->fMappingsFixed = pgmOld.fMappingsFixed; 714 pPGM->GCPtrMappingFixed = pgmOld.GCPtrMappingFixed; 715 pPGM->cbMappingFixed = pgmOld.cbMappingFixed; 716 717 pVM->aCpus[0].pgm.s.fA20Enabled = pgmOld.fA20Enabled; 718 pVM->aCpus[0].pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask; 719 pVM->aCpus[0].pgm.s.enmGuestMode = pgmOld.enmGuestMode; 720 } 721 else 722 { 723 AssertRelease(pVM->cCpus == 1); 724 725 SSMR3GetBool(pSSM, &pPGM->fMappingsFixed); 726 SSMR3GetGCPtr(pSSM, &pPGM->GCPtrMappingFixed); 727 SSMR3GetU32(pSSM, &pPGM->cbMappingFixed); 728 729 uint32_t cbRamSizeIgnored; 730 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored); 731 if (RT_FAILURE(rc)) 732 return rc; 733 SSMR3GetGCPhys(pSSM, &pVM->aCpus[0].pgm.s.GCPhysA20Mask); 734 735 uint32_t u32 = 0; 736 SSMR3GetUInt(pSSM, &u32); 737 pVM->aCpus[0].pgm.s.fA20Enabled = !!u32; 738 SSMR3GetUInt(pSSM, &pVM->aCpus[0].pgm.s.fSyncFlags); 739 RTUINT uGuestMode; 740 SSMR3GetUInt(pSSM, &uGuestMode); 741 pVM->aCpus[0].pgm.s.enmGuestMode = (PGMMODE)uGuestMode; 742 743 /* check separator. */ 744 SSMR3GetU32(pSSM, &u32Sep); 745 if (RT_FAILURE(rc)) 746 return rc; 747 if (u32Sep != (uint32_t)~0) 748 { 749 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep)); 750 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 751 } 752 } 753 754 /* 755 * The guest mappings. 1096 * @param pVM The VM handle 1097 * @param pSSM The SSM handle. 1098 * @param uVersion The saved state version. 1099 */ 1100 static int pgmR3LoadLockedMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion) 1101 { 1102 PPGM pPGM = &pVM->pgm.s; 1103 1104 /* 1105 * Ram range flags and bits. 756 1106 */ 757 1107 uint32_t i = 0; 758 for ( ;;i++)1108 for (PPGMRAMRANGE pRam = pPGM->pRamRangesR3; ; pRam = pRam->pNextR3, i++) 759 1109 { 760 1110 /* Check the seqence number / separator. */ 761 rc = SSMR3GetU32(pSSM, &u32Sep); 762 if (RT_FAILURE(rc)) 763 return rc; 764 if (u32Sep == ~0U) 765 break; 766 if (u32Sep != i) 767 { 768 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep)); 769 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 770 } 771 772 /* get the mapping details. */ 773 char szDesc[256]; 774 szDesc[0] = '\0'; 775 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc)); 776 if (RT_FAILURE(rc)) 777 return rc; 778 RTGCPTR GCPtr; 779 SSMR3GetGCPtr(pSSM, &GCPtr); 780 RTGCPTR cPTs; 781 rc = SSMR3GetGCUIntPtr(pSSM, &cPTs); 782 if (RT_FAILURE(rc)) 783 return rc; 784 785 /* find matching range. */ 786 PPGMMAPPING pMapping; 787 for (pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3) 788 if ( pMapping->cPTs == cPTs 789 && !strcmp(pMapping->pszDesc, szDesc)) 790 break; 791 AssertLogRelMsgReturn(pMapping, ("Couldn't find mapping: cPTs=%#x szDesc=%s (GCPtr=%RGv)\n", 792 cPTs, szDesc, GCPtr), 793 VERR_SSM_LOAD_CONFIG_MISMATCH); 794 795 /* relocate it. */ 796 if (pMapping->GCPtr != GCPtr) 797 { 798 AssertMsg((GCPtr >> X86_PD_SHIFT << X86_PD_SHIFT) == GCPtr, ("GCPtr=%RGv\n", GCPtr)); 799 pgmR3MapRelocate(pVM, pMapping, pMapping->GCPtr, GCPtr); 800 } 801 else 802 Log(("pgmR3Load: '%s' needed no relocation (%RGv)\n", szDesc, GCPtr)); 803 } 804 805 /* 806 * Ram range flags and bits. 807 */ 808 i = 0; 809 for (PPGMRAMRANGE pRam = pPGM->pRamRangesR3; ; pRam = pRam->pNextR3, i++) 810 { 811 /* Check the seqence number / separator. */ 812 rc = SSMR3GetU32(pSSM, &u32Sep); 1111 uint32_t u32Sep; 1112 int rc = SSMR3GetU32(pSSM, &u32Sep); 813 1113 if (RT_FAILURE(rc)) 814 1114 return rc; … … 1025 1325 } 1026 1326 1027 return rc; 1327 return VINF_SUCCESS; 1328 } 1329 1330 1331 /** 1332 * Worker for pgmR3Load. 1333 * 1334 * @returns VBox status code. 1335 * 1336 * @param pVM The VM handle. 1337 * @param pSSM The SSM handle. 1338 * @param uVersion The saved state version. 1339 */ 1340 static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion) 1341 { 1342 PPGM pPGM = &pVM->pgm.s; 1343 int rc; 1344 uint32_t u32Sep; 1345 1346 /* 1347 * Load basic data (required / unaffected by relocation). 1348 */ 1349 if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0) 1350 { 1351 rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFields[0]); 1352 AssertLogRelRCReturn(rc, rc); 1353 1354 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1355 { 1356 rc = SSMR3GetStruct(pSSM, &pVM->aCpus[i].pgm.s, &s_aPGMCpuFields[0]); 1357 AssertLogRelRCReturn(rc, rc); 1358 } 1359 } 1360 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC) 1361 { 1362 AssertRelease(pVM->cCpus == 1); 1363 1364 PGMOLD pgmOld; 1365 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]); 1366 AssertLogRelRCReturn(rc, rc); 1367 1368 pPGM->fMappingsFixed = pgmOld.fMappingsFixed; 1369 pPGM->GCPtrMappingFixed = pgmOld.GCPtrMappingFixed; 1370 pPGM->cbMappingFixed = pgmOld.cbMappingFixed; 1371 1372 pVM->aCpus[0].pgm.s.fA20Enabled = pgmOld.fA20Enabled; 1373 pVM->aCpus[0].pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask; 1374 pVM->aCpus[0].pgm.s.enmGuestMode = pgmOld.enmGuestMode; 1375 } 1376 else 1377 { 1378 AssertRelease(pVM->cCpus == 1); 1379 1380 SSMR3GetBool(pSSM, &pPGM->fMappingsFixed); 1381 SSMR3GetGCPtr(pSSM, &pPGM->GCPtrMappingFixed); 1382 SSMR3GetU32(pSSM, &pPGM->cbMappingFixed); 1383 1384 uint32_t cbRamSizeIgnored; 1385 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored); 1386 if (RT_FAILURE(rc)) 1387 return rc; 1388 SSMR3GetGCPhys(pSSM, &pVM->aCpus[0].pgm.s.GCPhysA20Mask); 1389 1390 uint32_t u32 = 0; 1391 SSMR3GetUInt(pSSM, &u32); 1392 pVM->aCpus[0].pgm.s.fA20Enabled = !!u32; 1393 SSMR3GetUInt(pSSM, &pVM->aCpus[0].pgm.s.fSyncFlags); 1394 RTUINT uGuestMode; 1395 SSMR3GetUInt(pSSM, &uGuestMode); 1396 pVM->aCpus[0].pgm.s.enmGuestMode = (PGMMODE)uGuestMode; 1397 1398 /* check separator. */ 1399 SSMR3GetU32(pSSM, &u32Sep); 1400 if (RT_FAILURE(rc)) 1401 return rc; 1402 if (u32Sep != (uint32_t)~0) 1403 { 1404 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep)); 1405 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 1406 } 1407 } 1408 1409 /* 1410 * The guest mappings. 1411 */ 1412 uint32_t i = 0; 1413 for (;; i++) 1414 { 1415 /* Check the seqence number / separator. */ 1416 rc = SSMR3GetU32(pSSM, &u32Sep); 1417 if (RT_FAILURE(rc)) 1418 return rc; 1419 if (u32Sep == ~0U) 1420 break; 1421 if (u32Sep != i) 1422 { 1423 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep)); 1424 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 1425 } 1426 1427 /* get the mapping details. */ 1428 char szDesc[256]; 1429 szDesc[0] = '\0'; 1430 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc)); 1431 if (RT_FAILURE(rc)) 1432 return rc; 1433 RTGCPTR GCPtr; 1434 SSMR3GetGCPtr(pSSM, &GCPtr); 1435 RTGCPTR cPTs; 1436 rc = SSMR3GetGCUIntPtr(pSSM, &cPTs); 1437 if (RT_FAILURE(rc)) 1438 return rc; 1439 1440 /* find matching range. */ 1441 PPGMMAPPING pMapping; 1442 for (pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3) 1443 if ( pMapping->cPTs == cPTs 1444 && !strcmp(pMapping->pszDesc, szDesc)) 1445 break; 1446 AssertLogRelMsgReturn(pMapping, ("Couldn't find mapping: cPTs=%#x szDesc=%s (GCPtr=%RGv)\n", 1447 cPTs, szDesc, GCPtr), 1448 VERR_SSM_LOAD_CONFIG_MISMATCH); 1449 1450 /* relocate it. */ 1451 if (pMapping->GCPtr != GCPtr) 1452 { 1453 AssertMsg((GCPtr >> X86_PD_SHIFT << X86_PD_SHIFT) == GCPtr, ("GCPtr=%RGv\n", GCPtr)); 1454 pgmR3MapRelocate(pVM, pMapping, pMapping->GCPtr, GCPtr); 1455 } 1456 else 1457 Log(("pgmR3Load: '%s' needed no relocation (%RGv)\n", szDesc, GCPtr)); 1458 } 1459 1460 /* 1461 * Load the RAM contents. 1462 */ 1463 if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0) 1464 return pgmR3LoadLockedMemory(pVM, pSSM, SSM_PASS_FINAL); 1465 return pgmR3LoadLockedMemoryOld(pVM, pSSM, uVersion); 1466 } 1467 1468 1469 /** 1470 * Prepare state load operation. 1471 * 1472 * @returns VBox status code. 1473 * @param pVM VM Handle. 1474 * @param pSSM SSM operation handle. 1475 */ 1476 static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM) 1477 { 1478 /* 1479 * Call the reset function to make sure all the memory is cleared. 1480 */ 1481 PGMR3Reset(pVM); 1482 NOREF(pSSM); 1483 return VINF_SUCCESS; 1028 1484 } 1029 1485 … … 1047 1503 * Validate version. 1048 1504 */ 1049 if ( uVersion != PGM_SAVED_STATE_VERSION 1050 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2 1051 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC 1052 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE) 1505 if ( ( uPass != SSM_PASS_FINAL 1506 && uVersion != PGM_SAVED_STATE_VERSION) 1507 || ( uVersion != PGM_SAVED_STATE_VERSION 1508 && uVersion != PGM_SAVED_STATE_VERSION_3_0_0 1509 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2 1510 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC 1511 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE) 1512 ) 1053 1513 { 1054 1514 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION)); … … 1056 1516 } 1057 1517 1058 /* 1059 * Call the reset function to make sure all the memory is cleared. 1060 */ 1061 PGMR3Reset(pVM); 1062 1063 /* 1064 * Do the loading while owning the lock because a bunch of the functions 1065 * we're using requires this. 1066 */ 1067 pgmLock(pVM); 1068 rc = pgmR3LoadLocked(pVM, pSSM, uVersion); 1069 pgmUnlock(pVM); 1070 if (RT_SUCCESS(rc)) 1518 if (uPass != SSM_PASS_FINAL) 1071 1519 { 1072 1520 /* 1073 * We require a full resync now.1521 * The non-final passes contains only memory. 1074 1522 */ 1075 for (VMCPUID i = 0; i < pVM->cCpus; i++)1076 {1077 PVMCPU pVCpu = &pVM->aCpus[i];1078 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);1079 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);1080 1081 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;1082 }1083 1084 pgmR3HandlerPhysicalUpdateAll(pVM);1085 1086 for (VMCPUID i = 0; i < pVM->cCpus; i++)1087 {1088 PVMCPU pVCpu = &pVM->aCpus[i];1089 1523 pgmLock(pVM); 1524 rc = pgmR3LoadLockedMemory(pVM, pSSM, uPass); 1525 pgmUnlock(pVM); 1526 } 1527 else 1528 { 1529 /* 1530 * Do the loading while owning the lock because a bunch of the functions 1531 * we're using requires this. 1532 */ 1533 pgmLock(pVM); 1534 rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion); 1535 pgmUnlock(pVM); 1536 if (RT_SUCCESS(rc)) 1537 { 1090 1538 /* 1091 * Change the paging mode.1539 * We require a full resync now. 1092 1540 */ 1093 rc = PGMR3ChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode); 1094 1095 /* Restore pVM->pgm.s.GCPhysCR3. */ 1096 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS); 1097 RTGCPHYS GCPhysCR3 = CPUMGetGuestCR3(pVCpu); 1098 if ( pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE 1099 || pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE_NX 1100 || pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64 1101 || pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64_NX) 1102 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAE_PAGE_MASK); 1103 else 1104 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAGE_MASK); 1105 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3; 1541 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1542 { 1543 PVMCPU pVCpu = &pVM->aCpus[i]; 1544 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL); 1545 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 1546 1547 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL; 1548 } 1549 1550 pgmR3HandlerPhysicalUpdateAll(pVM); 1551 1552 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1553 { 1554 PVMCPU pVCpu = &pVM->aCpus[i]; 1555 1556 /* 1557 * Change the paging mode. 1558 */ 1559 rc = PGMR3ChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode); 1560 1561 /* Restore pVM->pgm.s.GCPhysCR3. */ 1562 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS); 1563 RTGCPHYS GCPhysCR3 = CPUMGetGuestCR3(pVCpu); 1564 if ( pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE 1565 || pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE_NX 1566 || pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64 1567 || pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64_NX) 1568 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAE_PAGE_MASK); 1569 else 1570 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAGE_MASK); 1571 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3; 1572 } 1106 1573 } 1107 1574 } … … 1123 1590 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote, 1124 1591 NULL, pgmR3SaveExec, pgmR3SaveDone, 1125 NULL, pgmR3Load, NULL); 1126 } 1127 1128 1129 1592 pgmR3LoadPrep, pgmR3Load, NULL); 1593 } 1594 -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r22753 r23393 448 448 PGM_PAGE_SET_WRITTEN_TO(pPage); 449 449 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED); 450 Assert(pVM->pgm.s.cMonitoredPages > 0); 451 pVM->pgm.s.cMonitoredPages--; 452 pVM->pgm.s.cWrittenToPages++; 450 453 /* fall thru */ 451 454 default: /* to shut up GCC */
Note:
See TracChangeset
for help on using the changeset viewer.