VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMSavedState.cpp@ 23536

Last change on this file since 23536 was 23536, checked in by vboxsync, 16 years ago

oops.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 102.2 KB
Line 
1/* $Id: PGMSavedState.cpp 23536 2009-10-04 13:33:52Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, The Saved State Part.
4 */
5
6/*
7 * Copyright (C) 2006-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/pgm.h>
28#include <VBox/stam.h>
29#include <VBox/ssm.h>
30#include <VBox/pdm.h>
31#include "PGMInternal.h"
32#include <VBox/vm.h>
33
34#include <VBox/param.h>
35#include <VBox/err.h>
36
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/crc32.h>
40#include <iprt/mem.h>
41#include <iprt/sha.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44
45
46/*******************************************************************************
47* Defined Constants And Macros *
48*******************************************************************************/
49/** Saved state data unit version. */
50#ifdef VBOX_WITH_LIVE_MIGRATION
51# define PGM_SAVED_STATE_VERSION 10
52#else
53# define PGM_SAVED_STATE_VERSION 9
54#endif
55/** Saved state data unit version for 3.0. (pre live migration) */
56#define PGM_SAVED_STATE_VERSION_3_0_0 9
57/** Saved state data unit version for 2.2.2 and later. */
58#define PGM_SAVED_STATE_VERSION_2_2_2 8
59/** Saved state data unit version for 2.2.0. */
60#define PGM_SAVED_STATE_VERSION_RR_DESC 7
61/** Saved state data unit version. */
62#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6
63
64
65/** @name Sparse state record types
66 * @{ */
67/** Zero page. No data. */
68#define PGM_STATE_REC_RAM_ZERO UINT8_C(0x00)
69/** Raw page. */
70#define PGM_STATE_REC_RAM_RAW UINT8_C(0x01)
71/** Raw MMIO2 page. */
72#define PGM_STATE_REC_MMIO2_RAW UINT8_C(0x02)
73/** Zero MMIO2 page. */
74#define PGM_STATE_REC_MMIO2_ZERO UINT8_C(0x03)
75/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
76#define PGM_STATE_REC_ROM_VIRGIN UINT8_C(0x04)
77/** Raw shadowed ROM page. The protection (8-bit) preceeds the raw bits. */
78#define PGM_STATE_REC_ROM_SHW_RAW UINT8_C(0x05)
79/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
80#define PGM_STATE_REC_ROM_SHW_ZERO UINT8_C(0x06)
81/** ROM protection (8-bit). */
82#define PGM_STATE_REC_ROM_PROT UINT8_C(0x07)
83/** The last record type. */
84#define PGM_STATE_REC_LAST PGM_STATE_REC_ROM_PROT
85/** End marker. */
86#define PGM_STATE_REC_END UINT8_C(0xff)
87/** Flag indicating that the data is preceeded by the page address.
88 * For RAW pages this is a RTGCPHYS. For MMIO2 and ROM pages this is a 8-bit
89 * range ID and a 32-bit page index.
90 */
91#define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80)
92/** @} */
93
94/** The CRC-32 for a zero half page. */
95#define PGM_STATE_CRC32_ZERO_HALF_PAGE UINT32_C(0xf1e8ba9e)
96
97
98/*******************************************************************************
99* Structures and Typedefs *
100*******************************************************************************/
101/** For loading old saved states. (pre-smp) */
102typedef struct
103{
104 /** If set no conflict checks are required. (boolean) */
105 bool fMappingsFixed;
106 /** Size of fixed mapping */
107 uint32_t cbMappingFixed;
108 /** Base address (GC) of fixed mapping */
109 RTGCPTR GCPtrMappingFixed;
110 /** A20 gate mask.
111 * Our current approach to A20 emulation is to let REM do it and don't bother
112 * anywhere else. The interesting Guests will be operating with it enabled anyway.
113 * But whould need arrise, we'll subject physical addresses to this mask. */
114 RTGCPHYS GCPhysA20Mask;
115 /** A20 gate state - boolean! */
116 bool fA20Enabled;
117 /** The guest paging mode. */
118 PGMMODE enmGuestMode;
119} PGMOLD;
120
121
122/*******************************************************************************
123* Global Variables *
124*******************************************************************************/
125/** PGM fields to save/load. */
126static const SSMFIELD s_aPGMFields[] =
127{
128 SSMFIELD_ENTRY( PGM, fMappingsFixed),
129 SSMFIELD_ENTRY_GCPTR( PGM, GCPtrMappingFixed),
130 SSMFIELD_ENTRY( PGM, cbMappingFixed),
131 SSMFIELD_ENTRY_TERM()
132};
133
134static const SSMFIELD s_aPGMCpuFields[] =
135{
136 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
137 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
138 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
139 SSMFIELD_ENTRY_TERM()
140};
141
142static const SSMFIELD s_aPGMFields_Old[] =
143{
144 SSMFIELD_ENTRY( PGMOLD, fMappingsFixed),
145 SSMFIELD_ENTRY_GCPTR( PGMOLD, GCPtrMappingFixed),
146 SSMFIELD_ENTRY( PGMOLD, cbMappingFixed),
147 SSMFIELD_ENTRY( PGMOLD, fA20Enabled),
148 SSMFIELD_ENTRY_GCPHYS( PGMOLD, GCPhysA20Mask),
149 SSMFIELD_ENTRY( PGMOLD, enmGuestMode),
150 SSMFIELD_ENTRY_TERM()
151};
152
153
154/**
155 * Find the ROM tracking structure for the given page.
156 *
157 * @returns Pointer to the ROM page structure. NULL if the caller didn't check
158 * that it's a ROM page.
159 * @param pVM The VM handle.
160 * @param GCPhys The address of the ROM page.
161 */
162static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
163{
164 for (PPGMROMRANGE pRomRange = pVM->pgm.s.CTX_SUFF(pRomRanges);
165 pRomRange;
166 pRomRange = pRomRange->CTX_SUFF(pNext))
167 {
168 RTGCPHYS off = GCPhys - pRomRange->GCPhys;
169 if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
170 return &pRomRange->aPages[off >> PAGE_SHIFT];
171 }
172 return NULL;
173}
174
175
176/**
177 * Prepares the ROM pages for a live save.
178 *
179 * @returns VBox status code.
180 * @param pVM The VM handle.
181 */
182static int pgmR3PrepRomPages(PVM pVM)
183{
184 /*
185 * Initialize the live save tracking in the ROM page descriptors.
186 */
187 pgmLock(pVM);
188 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
189 {
190 PPGMRAMRANGE pRamHint = NULL;;
191 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
192
193 for (uint32_t iPage = 0; iPage < cPages; iPage++)
194 {
195 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)PGMROMPROT_INVALID;
196 pRom->aPages[iPage].LiveSave.fWrittenTo = false;
197 pRom->aPages[iPage].LiveSave.fDirty = true;
198 pRom->aPages[iPage].LiveSave.fDirtiedRecently = true;
199 if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
200 {
201 if (PGMROMPROT_IS_ROM(pRom->aPages[iPage].enmProt))
202 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow);
203 else
204 {
205 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
206 PPGMPAGE pPage;
207 int rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, GCPhys, &pPage, &pRamHint);
208 AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys));
209 if (RT_SUCCESS(rc))
210 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(pPage);
211 else
212 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow);
213 }
214 }
215 }
216
217 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
218 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
219 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
220 }
221 pgmUnlock(pVM);
222
223 return VINF_SUCCESS;
224}
225
226
227/**
228 * Assigns IDs to the ROM ranges and saves them.
229 *
230 * @returns VBox status code.
231 * @param pVM The VM handle.
232 * @param pSSM Saved state handle.
233 */
234static int pgmR3SaveRomRanges(PVM pVM, PSSMHANDLE pSSM)
235{
236 pgmLock(pVM);
237 uint8_t id = 1;
238 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3, id++)
239 {
240 pRom->idSavedState = id;
241 SSMR3PutU8(pSSM, id);
242 SSMR3PutStrZ(pSSM, ""); /* device name */
243 SSMR3PutU32(pSSM, 0); /* device instance */
244 SSMR3PutU8(pSSM, 0); /* region */
245 SSMR3PutStrZ(pSSM, pRom->pszDesc);
246 SSMR3PutGCPhys(pSSM, pRom->GCPhys);
247 int rc = SSMR3PutGCPhys(pSSM, pRom->cb);
248 if (RT_FAILURE(rc))
249 break;
250 }
251 pgmUnlock(pVM);
252 return SSMR3PutU8(pSSM, UINT8_MAX);
253}
254
255
256/**
257 * Loads the ROM range ID assignments.
258 *
259 * @returns VBox status code.
260 *
261 * @param pVM The VM handle.
262 * @param pSSM The saved state handle.
263 */
264static int pgmR3LoadRomRanges(PVM pVM, PSSMHANDLE pSSM)
265{
266 Assert(PGMIsLockOwner(pVM));
267
268 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
269 pRom->idSavedState = UINT8_MAX;
270
271 for (;;)
272 {
273 /*
274 * Read the data.
275 */
276 uint8_t id;
277 int rc = SSMR3GetU8(pSSM, &id);
278 if (RT_FAILURE(rc))
279 return rc;
280 if (id == UINT8_MAX)
281 {
282 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
283 AssertLogRelMsg(pRom->idSavedState != UINT8_MAX, ("%s\n", pRom->pszDesc));
284 return VINF_SUCCESS; /* the end */
285 }
286 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
287
288 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szDeviceName)];
289 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
290 AssertLogRelRCReturn(rc, rc);
291
292 uint32_t uInstance;
293 SSMR3GetU32(pSSM, &uInstance);
294 uint8_t iRegion;
295 SSMR3GetU8(pSSM, &iRegion);
296
297 char szDesc[64];
298 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
299 AssertLogRelRCReturn(rc, rc);
300
301 RTGCPHYS GCPhys;
302 SSMR3GetGCPhys(pSSM, &GCPhys);
303 RTGCPHYS cb;
304 rc = SSMR3GetGCPhys(pSSM, &cb);
305 if (RT_FAILURE(rc))
306 return rc;
307 AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
308 AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
309
310 /*
311 * Locate a matching ROM range.
312 */
313 AssertLogRelMsgReturn( uInstance == 0
314 && iRegion == 0
315 && szDevName[0] == '\0',
316 ("GCPhys=%RGp %s\n", GCPhys, szDesc),
317 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
318 PPGMROMRANGE pRom;
319 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
320 {
321 if ( pRom->idSavedState == UINT8_MAX
322 && !strcmp(pRom->pszDesc, szDesc))
323 {
324 pRom->idSavedState = id;
325 break;
326 }
327 }
328 AssertLogRelMsgReturn(pRom, ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_LOAD_CONFIG_MISMATCH);
329 } /* forever */
330}
331
332
333/**
334 * Scan ROM pages.
335 *
336 * @param pVM The VM handle.
337 */
338static void pgmR3ScanRomPages(PVM pVM)
339{
340 /*
341 * The shadow ROMs.
342 */
343 pgmLock(pVM);
344 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
345 {
346 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
347 {
348 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
349 for (uint32_t iPage = 0; iPage < cPages; iPage++)
350 {
351 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
352 if (pRomPage->LiveSave.fWrittenTo)
353 {
354 pRomPage->LiveSave.fWrittenTo = false;
355 if (!pRomPage->LiveSave.fDirty)
356 {
357 pRomPage->LiveSave.fDirty = true;
358 pVM->pgm.s.LiveSave.Rom.cReadyPages--;
359 pVM->pgm.s.LiveSave.Rom.cDirtyPages++;
360 }
361 pRomPage->LiveSave.fDirtiedRecently = true;
362 }
363 else
364 pRomPage->LiveSave.fDirtiedRecently = false;
365 }
366 }
367 }
368 pgmUnlock(pVM);
369}
370
371
372/**
373 * Takes care of the virgin ROM pages in the first pass.
374 *
375 * This is an attempt at simplifying the handling of ROM pages a little bit.
376 * This ASSUMES that no new ROM ranges will be added and that they won't be
377 * relinked in any way.
378 *
379 * @param pVM The VM handle.
380 * @param pSSM The SSM handle.
381 * @param fLiveSave Whether we're in a live save or not.
382 */
383static int pgmR3SaveRomVirginPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
384{
385 pgmLock(pVM);
386 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
387 {
388 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
389 for (uint32_t iPage = 0; iPage < cPages; iPage++)
390 {
391 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
392 PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;
393
394 /* Get the virgin page descriptor. */
395 PPGMPAGE pPage;
396 if (PGMROMPROT_IS_ROM(enmProt))
397 pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
398 else
399 pPage = &pRom->aPages[iPage].Virgin;
400
401 /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
402 int rc = VINF_SUCCESS;
403 char abPage[PAGE_SIZE];
404 if (!PGM_PAGE_IS_ZERO(pPage))
405 {
406 void const *pvPage;
407 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
408 if (RT_SUCCESS(rc))
409 memcpy(abPage, pvPage, PAGE_SIZE);
410 }
411 else
412 ASMMemZeroPage(abPage);
413 pgmUnlock(pVM);
414 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
415
416 /* Save it. */
417 if (iPage > 0)
418 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
419 else
420 {
421 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
422 SSMR3PutU8(pSSM, pRom->idSavedState);
423 SSMR3PutU32(pSSM, iPage);
424 }
425 SSMR3PutU8(pSSM, (uint8_t)enmProt);
426 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
427 if (RT_FAILURE(rc))
428 return rc;
429
430 /* Update state. */
431 pgmLock(pVM);
432 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
433 if (fLiveSave)
434 {
435 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
436 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
437 }
438 }
439 }
440 pgmUnlock(pVM);
441 return VINF_SUCCESS;
442}
443
444
445/**
446 * Saves dirty pages in the shadowed ROM ranges.
447 *
448 * Used by pgmR3LiveExecPart2 and pgmR3SaveExecMemory.
449 *
450 * @returns VBox status code.
451 * @param pVM The VM handle.
452 * @param pSSM The SSM handle.
453 * @param fLiveSave Whether it's a live save or not.
454 * @param fFinalPass Whether this is the final pass or not.
455 */
456static int pgmR3SaveShadowedRomPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
457{
458 /*
459 * The Shadowed ROMs.
460 *
461 * ASSUMES that the ROM ranges are fixed.
462 * ASSUMES that all the ROM ranges are mapped.
463 */
464 pgmLock(pVM);
465 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
466 {
467 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
468 {
469 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
470 uint32_t iPrevPage = cPages;
471 for (uint32_t iPage = 0; iPage < cPages; iPage++)
472 {
473 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
474 if ( !fLiveSave
475 || ( pRomPage->LiveSave.fDirty
476 && ( ( !pRomPage->LiveSave.fDirtiedRecently
477 && !pRomPage->LiveSave.fWrittenTo)
478 || fFinalPass
479 )
480 )
481 )
482 {
483 uint8_t abPage[PAGE_SIZE];
484 PGMROMPROT enmProt = pRomPage->enmProt;
485 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
486 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(&pVM->pgm.s, GCPhys);
487 bool fZero = PGM_PAGE_IS_ZERO(pPage);
488 int rc = VINF_SUCCESS;
489 if (!fZero)
490 {
491 void const *pvPage;
492 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
493 if (RT_SUCCESS(rc))
494 memcpy(abPage, pvPage, PAGE_SIZE);
495 }
496 if (fLiveSave && RT_SUCCESS(rc))
497 {
498 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
499 pRomPage->LiveSave.fDirty = false;
500 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
501 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
502 }
503 pgmUnlock(pVM);
504 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
505
506 if (iPage - 1U == iPrevPage && iPage > 0)
507 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW));
508 else
509 {
510 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW) | PGM_STATE_REC_FLAG_ADDR);
511 SSMR3PutU8(pSSM, pRom->idSavedState);
512 SSMR3PutU32(pSSM, iPage);
513 }
514 rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
515 if (!fZero)
516 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
517 if (RT_FAILURE(rc))
518 return rc;
519
520 pgmLock(pVM);
521 iPrevPage = iPage;
522 }
523 /*
524 * In the final pass, make sure the protection is in sync.
525 */
526 else if ( fFinalPass
527 && pRomPage->LiveSave.u8Prot != pRomPage->enmProt)
528 {
529 PGMROMPROT enmProt = pRomPage->enmProt;
530 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
531 pgmUnlock(pVM);
532
533 if (iPage - 1U == iPrevPage && iPage > 0)
534 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT);
535 else
536 {
537 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT | PGM_STATE_REC_FLAG_ADDR);
538 SSMR3PutU8(pSSM, pRom->idSavedState);
539 SSMR3PutU32(pSSM, iPage);
540 }
541 int rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
542 if (RT_FAILURE(rc))
543 return rc;
544
545 pgmLock(pVM);
546 iPrevPage = iPage;
547 }
548 }
549 }
550 }
551 pgmUnlock(pVM);
552 return VINF_SUCCESS;
553}
554
555
556/**
557 * Cleans up ROM pages after a live save.
558 *
559 * @param pVM The VM handle.
560 */
561static void pgmR3DoneRomPages(PVM pVM)
562{
563 NOREF(pVM);
564}
565
566
567/**
568 * Prepares the MMIO2 pages for a live save.
569 *
570 * @returns VBox status code.
571 * @param pVM The VM handle.
572 */
573static int pgmR3PrepMmio2Pages(PVM pVM)
574{
575 /*
576 * Initialize the live save tracking in the MMIO2 ranges.
577 * ASSUME nothing changes here.
578 */
579 pgmLock(pVM);
580 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
581 {
582 uint32_t const cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
583 pgmUnlock(pVM);
584
585 PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
586 if (!paLSPages)
587 return VERR_NO_MEMORY;
588 for (uint32_t iPage = 0; iPage < cPages; iPage++)
589 {
590 /* Initialize it as a dirty zero page. */
591 paLSPages[iPage].fDirty = true;
592 paLSPages[iPage].cUnchangedScans = 0;
593 paLSPages[iPage].fZero = true;
594 paLSPages[iPage].u32CrcH1 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
595 paLSPages[iPage].u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
596 }
597
598 pgmLock(pVM);
599 pMmio2->paLSPages = paLSPages;
600 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
601 }
602 pgmUnlock(pVM);
603 return VINF_SUCCESS;
604}
605
606
607/**
608 * Assigns IDs to the MMIO2 ranges and saves them.
609 *
610 * @returns VBox status code.
611 * @param pVM The VM handle.
612 * @param pSSM Saved state handle.
613 */
614static int pgmR3SaveMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
615{
616 pgmLock(pVM);
617 uint8_t id = 1;
618 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3, id++)
619 {
620 pMmio2->idSavedState = id;
621 SSMR3PutU8(pSSM, id);
622 SSMR3PutStrZ(pSSM, pMmio2->pDevInsR3->pDevReg->szDeviceName);
623 SSMR3PutU32(pSSM, pMmio2->pDevInsR3->iInstance);
624 SSMR3PutU8(pSSM, pMmio2->iRegion);
625 SSMR3PutStrZ(pSSM, pMmio2->RamRange.pszDesc);
626 int rc = SSMR3PutGCPhys(pSSM, pMmio2->RamRange.cb);
627 if (RT_FAILURE(rc))
628 break;
629 }
630 pgmUnlock(pVM);
631 return SSMR3PutU8(pSSM, UINT8_MAX);
632}
633
634
635/**
636 * Loads the MMIO2 range ID assignments.
637 *
638 * @returns VBox status code.
639 *
640 * @param pVM The VM handle.
641 * @param pSSM The saved state handle.
642 */
643static int pgmR3LoadMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
644{
645 Assert(PGMIsLockOwner(pVM));
646
647 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
648 pMmio2->idSavedState = UINT8_MAX;
649
650 for (;;)
651 {
652 /*
653 * Read the data.
654 */
655 uint8_t id;
656 int rc = SSMR3GetU8(pSSM, &id);
657 if (RT_FAILURE(rc))
658 return rc;
659 if (id == UINT8_MAX)
660 {
661 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
662 AssertLogRelMsg(pMmio2->idSavedState != UINT8_MAX, ("%s\n", pMmio2->RamRange.pszDesc));
663 return VINF_SUCCESS; /* the end */
664 }
665 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
666
667 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szDeviceName)];
668 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
669 AssertLogRelRCReturn(rc, rc);
670
671 uint32_t uInstance;
672 SSMR3GetU32(pSSM, &uInstance);
673 uint8_t iRegion;
674 SSMR3GetU8(pSSM, &iRegion);
675
676 char szDesc[64];
677 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
678 AssertLogRelRCReturn(rc, rc);
679
680 RTGCPHYS cb;
681 rc = SSMR3GetGCPhys(pSSM, &cb);
682 AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
683
684 /*
685 * Locate a matching MMIO2 range.
686 */
687 PPGMMMIO2RANGE pMmio2;
688 for (pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
689 {
690 if ( pMmio2->idSavedState == UINT8_MAX
691 && pMmio2->iRegion == iRegion
692 && pMmio2->pDevInsR3->iInstance == uInstance
693 && !strcmp(pMmio2->pDevInsR3->pDevReg->szDeviceName, szDevName))
694 {
695 pMmio2->idSavedState = id;
696 break;
697 }
698 }
699 AssertLogRelMsgReturn(pMmio2, ("%s/%u/%u: %s\n", szDevName, uInstance, iRegion, szDesc), VERR_SSM_LOAD_CONFIG_MISMATCH);
700 } /* forever */
701}
702
703
704/**
705 * Scans one MMIO2 page.
706 *
707 * @returns True if changed, false if unchanged.
708 *
709 * @param pVM The VM handle
710 * @param pbPage The page bits.
711 * @param pLSPage The live save tracking structure for the page.
712 *
713 */
714DECLINLINE(bool) pgmR3ScanMmio2Page(PVM pVM, uint8_t const *pbPage, PPGMLIVESAVEMMIO2PAGE pLSPage)
715{
716 /*
717 * Special handling of zero pages.
718 */
719 if (pLSPage->fZero)
720 {
721 if (ASMMemIsZeroPage(pbPage))
722 {
723 /* Not modified. */
724 if (pLSPage->fDirty)
725 pLSPage->cUnchangedScans++;
726 return false;
727 }
728
729 pLSPage->fZero = false;
730 pLSPage->u32CrcH1 = RTCrc32(pbPage, PAGE_SIZE / 2);
731 }
732 else
733 {
734 /*
735 * CRC the first half, if it doesn't match the page is dirty and
736 * we won't check the 2nd half (we'll do that next time).
737 */
738 uint32_t u32CrcH1 = RTCrc32(pbPage, PAGE_SIZE / 2);
739 if (u32CrcH1 == pLSPage->u32CrcH1)
740 {
741 uint32_t u32CrcH2 = RTCrc32(pbPage + PAGE_SIZE / 2, PAGE_SIZE / 2);
742 if (u32CrcH2 == pLSPage->u32CrcH2)
743 {
744 /* Probably not modified. */
745 if (pLSPage->fDirty)
746 pLSPage->cUnchangedScans++;
747 return false;
748 }
749
750 pLSPage->u32CrcH2 = u32CrcH2;
751 }
752 else
753 {
754 pLSPage->u32CrcH1 = u32CrcH1;
755 if ( u32CrcH1 == PGM_STATE_CRC32_ZERO_HALF_PAGE
756 && ASMMemIsZeroPage(pbPage))
757 {
758 pLSPage->u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
759 pLSPage->fZero = true;
760 }
761 }
762 }
763
764 /* dirty page path */
765 pLSPage->cUnchangedScans = 0;
766 if (!pLSPage->fDirty)
767 {
768 pLSPage->fDirty = true;
769 pVM->pgm.s.LiveSave.Mmio2.cReadyPages--;
770 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages++;
771 }
772 return true;
773}
774
775
776/**
777 * Scan for MMIO2 page modifications.
778 *
779 * @param pVM The VM handle.
780 * @param uPass The pass number.
781 */
782static void pgmR3ScanMmio2Pages(PVM pVM, uint32_t uPass)
783{
784 /*
785 * Since this is a bit expensive we lower the scan rate after a little while.
786 */
787 if ( ( (uPass & 3) != 0
788 && uPass > 10)
789 || uPass == SSM_PASS_FINAL)
790 return;
791
792 pgmLock(pVM); /* paranoia */
793 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
794 {
795 PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
796 uint8_t const *pbPage = (uint8_t const *)pMmio2->RamRange.pvR3;
797 uint32_t cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
798 pgmUnlock(pVM);
799
800 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
801 {
802 uint8_t const *pbPage = (uint8_t const *)pMmio2->pvR3 + iPage * PAGE_SIZE;
803 pgmR3ScanMmio2Page(pVM,pbPage, &paLSPages[iPage]);
804 }
805
806 pgmLock(pVM);
807 }
808 pgmUnlock(pVM);
809
810}
811
812
813/**
814 * Save quiescent MMIO2 pages.
815 *
816 * @returns VBox status code.
817 * @param pVM The VM handle.
818 * @param pSSM The SSM handle.
819 * @param fLiveSave Whether it's a live save or not.
820 * @param uPass The pass number.
821 */
822static int pgmR3SaveMmio2Pages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
823{
824 /** @todo implement live saving of MMIO2 pages. (Need some way of telling the
825 * device that we wish to know about changes.) */
826
827 int rc = VINF_SUCCESS;
828 if (uPass == SSM_PASS_FINAL)
829 {
830 /*
831 * The mop up round.
832 */
833 pgmLock(pVM);
834 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3;
835 pMmio2 && RT_SUCCESS(rc);
836 pMmio2 = pMmio2->pNextR3)
837 {
838 PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
839 uint8_t const *pbPage = (uint8_t const *)pMmio2->RamRange.pvR3;
840 uint32_t cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
841 uint32_t iPageLast = cPages;
842 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
843 {
844 uint8_t u8Type;
845 if (!fLiveSave)
846 u8Type = ASMMemIsZeroPage(pbPage) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
847 else
848 {
849 /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
850 if ( !paLSPages[iPage].fDirty
851 && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
852 {
853 if (paLSPages[iPage].fZero)
854 continue;
855
856 uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
857 RTSha1(pbPage, PAGE_SIZE, abSha1Hash);
858 if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
859 continue;
860 }
861 u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
862 }
863
864 if (iPage != 0 && iPage == iPageLast + 1)
865 rc = SSMR3PutU8(pSSM, u8Type);
866 else
867 {
868 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
869 SSMR3PutU8(pSSM, pMmio2->idSavedState);
870 rc = SSMR3PutU32(pSSM, iPage);
871 }
872 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
873 rc = SSMR3PutMem(pSSM, pbPage, PAGE_SIZE);
874 if (RT_FAILURE(rc))
875 break;
876 iPageLast = iPage;
877 }
878 }
879 pgmUnlock(pVM);
880 }
881 /*
882 * Reduce the rate after a little while since the current MMIO2 approach is
883 * a bit expensive.
884 * We position it two passes after the scan pass to avoid saving busy pages.
885 */
886 else if ( uPass <= 10
887 || (uPass & 3) == 2)
888 {
889 pgmLock(pVM);
890 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3;
891 pMmio2 && RT_SUCCESS(rc);
892 pMmio2 = pMmio2->pNextR3)
893 {
894 PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
895 uint8_t const *pbPage = (uint8_t const *)pMmio2->RamRange.pvR3;
896 uint32_t cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
897 uint32_t iPageLast = cPages;
898 pgmUnlock(pVM);
899
900 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
901 {
902 /* Skip clean pages and pages which hasn't quiesced. */
903 if (!paLSPages[iPage].fDirty)
904 continue;
905 if (paLSPages[iPage].cUnchangedScans < 3)
906 continue;
907 if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
908 continue;
909
910 /* Save it. */
911 bool const fZero = paLSPages[iPage].fZero;
912 uint8_t abPage[PAGE_SIZE];
913 if (!fZero)
914 {
915 memcpy(abPage, pbPage, PAGE_SIZE);
916 RTSha1(abPage, PAGE_SIZE, paLSPages[iPage].abSha1Saved);
917 }
918
919 uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
920 if (iPage != 0 && iPage == iPageLast + 1)
921 rc = SSMR3PutU8(pSSM, u8Type);
922 else
923 {
924 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
925 SSMR3PutU8(pSSM, pMmio2->idSavedState);
926 rc = SSMR3PutU32(pSSM, iPage);
927 }
928 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
929 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
930 if (RT_FAILURE(rc))
931 break;
932
933 /* Housekeeping. */
934 paLSPages[iPage].fDirty = false;
935 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
936 pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
937 iPageLast = iPage;
938 }
939
940 pgmLock(pVM);
941 }
942 pgmUnlock(pVM);
943 }
944
945 return rc;
946}
947
948
949/**
950 * Cleans up MMIO2 pages after a live save.
951 *
952 * @param pVM The VM handle.
953 */
954static void pgmR3DoneMmio2Pages(PVM pVM)
955{
956 /*
957 * Free the tracking structures for the MMIO2 pages.
958 * We do the freeing outside the lock in case the VM is running.
959 */
960 pgmLock(pVM);
961 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
962 {
963 void *pvMmio2ToFree = pMmio2->paLSPages;
964 if (pvMmio2ToFree)
965 {
966 pMmio2->paLSPages = NULL;
967 pgmUnlock(pVM);
968 MMR3HeapFree(pvMmio2ToFree);
969 pgmLock(pVM);
970 }
971 }
972 pgmUnlock(pVM);
973}
974
975
976/**
977 * Prepares the RAM pages for a live save.
978 *
979 * @returns VBox status code.
980 * @param pVM The VM handle.
981 */
982static int pgmR3PrepRamPages(PVM pVM)
983{
984
985 /*
986 * Try allocating tracking structures for the ram ranges.
987 *
988 * To avoid lock contention, we leave the lock every time we're allocating
989 * a new array. This means we'll have to ditch the allocation and start
990 * all over again if the RAM range list changes in-between.
991 *
992 * Note! pgmR3SaveDone will always be called and it is therefore responsible
993 * for cleaning up.
994 */
995 PPGMRAMRANGE pCur;
996 pgmLock(pVM);
997 do
998 {
999 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
1000 {
1001 if ( !pCur->paLSPages
1002 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1003 {
1004 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1005 uint32_t const cPages = pCur->cb >> PAGE_SHIFT;
1006 pgmUnlock(pVM);
1007 PPGMLIVESAVEPAGE paLSPages = (PPGMLIVESAVEPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVEPAGE));
1008 if (!paLSPages)
1009 return VERR_NO_MEMORY;
1010 pgmLock(pVM);
1011 if (pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1012 {
1013 pgmUnlock(pVM);
1014 MMR3HeapFree(paLSPages);
1015 pgmLock(pVM);
1016 break; /* try again */
1017 }
1018 pCur->paLSPages = paLSPages;
1019
1020 /*
1021 * Initialize the array.
1022 */
1023 uint32_t iPage = cPages;
1024 while (iPage-- > 0)
1025 {
1026 /** @todo yield critsect! (after moving this away from EMT0) */
1027 PCPGMPAGE pPage = &pCur->aPages[iPage];
1028 paLSPages[iPage].uPassSaved = UINT32_MAX;
1029 paLSPages[iPage].cDirtied = 0;
1030 paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */
1031 paLSPages[iPage].fWriteMonitored = 0;
1032 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1033 paLSPages[iPage].u2Reserved = 0;
1034 switch (PGM_PAGE_GET_TYPE(pPage))
1035 {
1036 case PGMPAGETYPE_RAM:
1037 if (PGM_PAGE_IS_ZERO(pPage))
1038 {
1039 paLSPages[iPage].fZero = 1;
1040 paLSPages[iPage].fShared = 0;
1041 }
1042 else if (PGM_PAGE_IS_SHARED(pPage))
1043 {
1044 paLSPages[iPage].fZero = 0;
1045 paLSPages[iPage].fShared = 1;
1046 }
1047 else
1048 {
1049 paLSPages[iPage].fZero = 0;
1050 paLSPages[iPage].fShared = 0;
1051 }
1052 paLSPages[iPage].fIgnore = 0;
1053 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1054 break;
1055
1056 case PGMPAGETYPE_ROM_SHADOW:
1057 case PGMPAGETYPE_ROM:
1058 {
1059 paLSPages[iPage].fZero = 0;
1060 paLSPages[iPage].fShared = 0;
1061 paLSPages[iPage].fDirty = 0;
1062 paLSPages[iPage].fIgnore = 1;
1063 pVM->pgm.s.LiveSave.cIgnoredPages++;
1064 break;
1065 }
1066
1067 default:
1068 AssertMsgFailed(("%R[pgmpage]", pPage));
1069 case PGMPAGETYPE_MMIO2:
1070 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1071 paLSPages[iPage].fZero = 0;
1072 paLSPages[iPage].fShared = 0;
1073 paLSPages[iPage].fDirty = 0;
1074 paLSPages[iPage].fIgnore = 1;
1075 pVM->pgm.s.LiveSave.cIgnoredPages++;
1076 break;
1077
1078 case PGMPAGETYPE_MMIO:
1079 paLSPages[iPage].fZero = 0;
1080 paLSPages[iPage].fShared = 0;
1081 paLSPages[iPage].fDirty = 0;
1082 paLSPages[iPage].fIgnore = 1;
1083 pVM->pgm.s.LiveSave.cIgnoredPages++;
1084 break;
1085 }
1086 }
1087 }
1088 }
1089 } while (pCur);
1090 pgmUnlock(pVM);
1091
1092 return VINF_SUCCESS;
1093}
1094
1095
1096/**
1097 * Scan for RAM page modifications and reprotect them.
1098 *
1099 * @param pVM The VM handle.
1100 * @param fFinalPass Whether this is the final pass or not.
1101 */
1102static void pgmR3ScanRamPages(PVM pVM, bool fFinalPass)
1103{
1104 /*
1105 * The RAM.
1106 */
1107 RTGCPHYS GCPhysCur = 0;
1108 PPGMRAMRANGE pCur;
1109 pgmLock(pVM);
1110 do
1111 {
1112 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1113 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
1114 {
1115 if ( pCur->GCPhysLast > GCPhysCur
1116 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1117 {
1118 PPGMLIVESAVEPAGE paLSPages = pCur->paLSPages;
1119 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
1120 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
1121 GCPhysCur = 0;
1122 for (; iPage < cPages; iPage++)
1123 {
1124 /* Do yield first. */
1125 if ( !fFinalPass
1126 && (iPage & 0x7ff) == 0x100
1127 && PDMR3CritSectYield(&pVM->pgm.s.CritSect)
1128 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1129 {
1130 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1131 break; /* restart */
1132 }
1133
1134 /* Skip already ignored pages. */
1135 if (paLSPages[iPage].fIgnore)
1136 continue;
1137
1138 if (RT_LIKELY(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM))
1139 {
1140 /*
1141 * A RAM page.
1142 */
1143 switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
1144 {
1145 case PGM_PAGE_STATE_ALLOCATED:
1146 /** @todo Optimize this: Don't always re-enable write
1147 * monitoring if the page is known to be very busy. */
1148 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1149 {
1150 Assert(paLSPages[iPage].fWriteMonitored);
1151 PGM_PAGE_CLEAR_WRITTEN_TO(&pCur->aPages[iPage]);
1152 Assert(pVM->pgm.s.cWrittenToPages > 0);
1153 pVM->pgm.s.cWrittenToPages--;
1154 }
1155 else
1156 {
1157 Assert(!paLSPages[iPage].fWriteMonitored);
1158 pVM->pgm.s.LiveSave.Ram.cMonitoredPages++;
1159 if (paLSPages[iPage].fZero)
1160 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1161 }
1162
1163 if (!paLSPages[iPage].fDirty)
1164 {
1165 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1166 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1167 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1168 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1169 }
1170
1171 PGM_PAGE_SET_STATE(&pCur->aPages[iPage], PGM_PAGE_STATE_WRITE_MONITORED);
1172 pVM->pgm.s.cMonitoredPages++;
1173 paLSPages[iPage].fWriteMonitored = 1;
1174 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1175 paLSPages[iPage].fDirty = 1;
1176 paLSPages[iPage].fZero = 0;
1177 paLSPages[iPage].fShared = 0;
1178 break;
1179
1180 case PGM_PAGE_STATE_WRITE_MONITORED:
1181 Assert(paLSPages[iPage].fWriteMonitored);
1182 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) == 0)
1183 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1184 else
1185 {
1186 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1187 if (!paLSPages[iPage].fDirty)
1188 {
1189 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1190 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1191 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1192 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1193 }
1194 }
1195 break;
1196
1197 case PGM_PAGE_STATE_ZERO:
1198 if (!paLSPages[iPage].fZero)
1199 {
1200 paLSPages[iPage].fZero = 1;
1201 paLSPages[iPage].fShared = 0;
1202 if (!paLSPages[iPage].fDirty)
1203 {
1204 paLSPages[iPage].fDirty = 1;
1205 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1206 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1207 }
1208 pVM->pgm.s.LiveSave.Ram.cZeroPages++;
1209 }
1210 break;
1211
1212 case PGM_PAGE_STATE_SHARED:
1213 if (!paLSPages[iPage].fShared)
1214 {
1215 paLSPages[iPage].fZero = 0;
1216 paLSPages[iPage].fShared = 1;
1217 if (!paLSPages[iPage].fDirty)
1218 {
1219 paLSPages[iPage].fDirty = 1;
1220 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1221 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1222 }
1223 }
1224 break;
1225 }
1226 }
1227 else
1228 {
1229 /*
1230 * All other types => Ignore the page.
1231 */
1232 Assert(!paLSPages[iPage].fIgnore); /* skipped before switch */
1233 paLSPages[iPage].fIgnore = 1;
1234 if (paLSPages[iPage].fWriteMonitored)
1235 {
1236 /** @todo this doesn't hold water when we start monitoring MMIO2 and ROM shadow
1237 * pages! */
1238 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
1239 {
1240 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
1241 PGM_PAGE_SET_STATE(&pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
1242 Assert(pVM->pgm.s.cMonitoredPages > 0);
1243 pVM->pgm.s.cMonitoredPages--;
1244 }
1245 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1246 {
1247 PGM_PAGE_CLEAR_WRITTEN_TO(&pCur->aPages[iPage]);
1248 Assert(pVM->pgm.s.cWrittenToPages > 0);
1249 pVM->pgm.s.cWrittenToPages--;
1250 }
1251 pVM->pgm.s.LiveSave.Ram.cMonitoredPages--;
1252 }
1253
1254 /** @todo the counting doesn't quite work out here. fix later? */
1255 if (paLSPages[iPage].fDirty)
1256 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1257 else
1258 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1259 pVM->pgm.s.LiveSave.cIgnoredPages++;
1260 }
1261 } /* for each page in range */
1262
1263 if (GCPhysCur != 0)
1264 break; /* Yield + ramrange change */
1265 GCPhysCur = pCur->GCPhysLast;
1266 }
1267 } /* for each range */
1268 } while (pCur);
1269 pgmUnlock(pVM);
1270}
1271
1272
1273/**
1274 * Save quiescent RAM pages.
1275 *
1276 * @returns VBox status code.
1277 * @param pVM The VM handle.
1278 * @param pSSM The SSM handle.
1279 * @param fLiveSave Whether it's a live save or not.
1280 * @param uPass The pass number.
1281 */
1282static int pgmR3SaveRamPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
1283{
1284 /*
1285 * The RAM.
1286 */
1287 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
1288 RTGCPHYS GCPhysCur = 0;
1289 PPGMRAMRANGE pCur;
1290 pgmLock(pVM);
1291 do
1292 {
1293 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1294 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
1295 {
1296 if ( pCur->GCPhysLast > GCPhysCur
1297 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1298 {
1299 PPGMLIVESAVEPAGE paLSPages = pCur->paLSPages;
1300 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
1301 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
1302 GCPhysCur = 0;
1303 for (; iPage < cPages; iPage++)
1304 {
1305 /* Do yield first. */
1306 if ( uPass != SSM_PASS_FINAL
1307 && (iPage & 0x7ff) == 0x100
1308 && PDMR3CritSectYield(&pVM->pgm.s.CritSect)
1309 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1310 {
1311 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1312 break; /* restart */
1313 }
1314
1315 /*
1316 * Only save pages that hasn't changed since last scan and are dirty.
1317 */
1318 if ( uPass != SSM_PASS_FINAL
1319 && paLSPages)
1320 {
1321 if (!paLSPages[iPage].fDirty)
1322 continue;
1323 if (paLSPages[iPage].fWriteMonitoredJustNow)
1324 continue;
1325 if (paLSPages[iPage].fIgnore)
1326 continue;
1327 if (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) != PGMPAGETYPE_RAM) /* in case of recent ramppings */
1328 continue;
1329 if ( PGM_PAGE_GET_STATE(&pCur->aPages[iPage])
1330 != ( paLSPages[iPage].fZero
1331 ? PGM_PAGE_STATE_ZERO
1332 : paLSPages[iPage].fShared
1333 ? PGM_PAGE_STATE_SHARED
1334 : PGM_PAGE_STATE_WRITE_MONITORED))
1335 continue;
1336 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) > 0)
1337 continue;
1338 }
1339 else
1340 {
1341 if ( paLSPages
1342 && !paLSPages[iPage].fDirty
1343 && !paLSPages[iPage].fIgnore)
1344 continue;
1345 if (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) != PGMPAGETYPE_RAM)
1346 continue;
1347 }
1348
1349 /*
1350 * Do the saving outside the PGM critsect since SSM may block on I/O.
1351 */
1352 int rc;
1353 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1354 bool fZero = PGM_PAGE_IS_ZERO(&pCur->aPages[iPage]);
1355
1356 if (!fZero)
1357 {
1358 /*
1359 * Copy the page and then save it outside the lock (since any
1360 * SSM call may block).
1361 */
1362 char abPage[PAGE_SIZE];
1363 void const *pvPage;
1364 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage);
1365 if (RT_SUCCESS(rc))
1366 memcpy(abPage, pvPage, PAGE_SIZE);
1367 pgmUnlock(pVM);
1368 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
1369
1370 if (GCPhys == GCPhysLast + PAGE_SIZE)
1371 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
1372 else
1373 {
1374 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
1375 SSMR3PutGCPhys(pSSM, GCPhys);
1376 }
1377 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
1378 }
1379 else
1380 {
1381 /*
1382 * Dirty zero page.
1383 */
1384 pgmUnlock(pVM);
1385
1386 if (GCPhys == GCPhysLast + PAGE_SIZE)
1387 rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO);
1388 else
1389 {
1390 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO | PGM_STATE_REC_FLAG_ADDR);
1391 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1392 }
1393 }
1394 if (RT_FAILURE(rc))
1395 return rc;
1396
1397 pgmLock(pVM);
1398 GCPhysLast = GCPhys;
1399 if (paLSPages)
1400 {
1401 paLSPages[iPage].fDirty = 0;
1402 paLSPages[iPage].uPassSaved = uPass;
1403 pVM->pgm.s.LiveSave.Ram.cReadyPages++;
1404 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1405 if (fZero)
1406 pVM->pgm.s.LiveSave.Ram.cZeroPages++;
1407 }
1408 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1409 {
1410 GCPhysCur = GCPhys | PAGE_OFFSET_MASK;
1411 break; /* restart */
1412 }
1413
1414 } /* for each page in range */
1415
1416 if (GCPhysCur != 0)
1417 break; /* Yield + ramrange change */
1418 GCPhysCur = pCur->GCPhysLast;
1419 }
1420 } /* for each range */
1421 } while (pCur);
1422 pgmUnlock(pVM);
1423
1424 return VINF_SUCCESS;
1425}
1426
1427
1428/**
1429 * Cleans up RAM pages after a live save.
1430 *
1431 * @param pVM The VM handle.
1432 */
1433static void pgmR3DoneRamPages(PVM pVM)
1434{
1435 /*
1436 * Free the tracking arrays and disable write monitoring.
1437 *
1438 * Play nice with the PGM lock in case we're called while the VM is still
1439 * running. This means we have to delay the freeing since we wish to use
1440 * paLSPages as an indicator of which RAM ranges which we need to scan for
1441 * write monitored pages.
1442 */
1443 void *pvToFree = NULL;
1444 PPGMRAMRANGE pCur;
1445 uint32_t cMonitoredPages = 0;
1446 pgmLock(pVM);
1447 do
1448 {
1449 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
1450 {
1451 if (pCur->paLSPages)
1452 {
1453 if (pvToFree)
1454 {
1455 uint32_t idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1456 pgmUnlock(pVM);
1457 MMR3HeapFree(pvToFree);
1458 pvToFree = NULL;
1459 pgmLock(pVM);
1460 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1461 break; /* start over again. */
1462 }
1463
1464 pvToFree = pCur->paLSPages;
1465 pCur->paLSPages = NULL;
1466
1467 uint32_t iPage = pCur->cb >> PAGE_SHIFT;
1468 while (iPage--)
1469 {
1470 PPGMPAGE pPage = &pCur->aPages[iPage];
1471 PGM_PAGE_CLEAR_WRITTEN_TO(pPage);
1472 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1473 {
1474 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1475 cMonitoredPages++;
1476 }
1477 }
1478 }
1479 }
1480 } while (pCur);
1481
1482 Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages);
1483 if (pVM->pgm.s.cMonitoredPages < cMonitoredPages)
1484 pVM->pgm.s.cMonitoredPages = 0;
1485 else
1486 pVM->pgm.s.cMonitoredPages -= cMonitoredPages;
1487
1488 pgmUnlock(pVM);
1489
1490 MMR3HeapFree(pvToFree);
1491 pvToFree = NULL;
1492}
1493
1494
1495/**
1496 * Execute a live save pass.
1497 *
1498 * @returns VBox status code.
1499 *
1500 * @param pVM The VM handle.
1501 * @param pSSM The SSM handle.
1502 */
1503static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1504{
1505 int rc;
1506
1507 /*
1508 * Save the MMIO2 and ROM range IDs in pass 0.
1509 */
1510 if (uPass == 0)
1511 {
1512 rc = pgmR3SaveRomRanges(pVM, pSSM);
1513 if (RT_FAILURE(rc))
1514 return rc;
1515 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1516 if (RT_FAILURE(rc))
1517 return rc;
1518 }
1519
1520 /*
1521 * Do the scanning.
1522 */
1523 pgmR3ScanRomPages(pVM);
1524 pgmR3ScanMmio2Pages(pVM, uPass);
1525 pgmR3ScanRamPages(pVM, false /*fFinalPass*/);
1526 pgmR3PoolClearAll(pVM); /** @todo this could perhaps be optimized a bit. */
1527
1528 /*
1529 * Save the pages.
1530 */
1531 if (uPass == 0)
1532 rc = pgmR3SaveRomVirginPages( pVM, pSSM, true /*fLiveSave*/);
1533 else
1534 rc = VINF_SUCCESS;
1535 if (RT_SUCCESS(rc))
1536 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
1537 if (RT_SUCCESS(rc))
1538 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, uPass);
1539 if (RT_SUCCESS(rc))
1540 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, uPass);
1541 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
1542
1543 return rc;
1544}
1545
1546//#include <iprt/stream.h>
1547
1548/**
1549 * Votes on whether the live save phase is done or not.
1550 *
1551 * @returns VBox status code.
1552 *
1553 * @param pVM The VM handle.
1554 * @param pSSM The SSM handle.
1555 */
1556static DECLCALLBACK(int) pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM)
1557{
1558#if 0
1559 RTPrintf("# Rom[R/D/Z/M]=%03x/%03x/%03x/%03x Mmio2=%04x/%04x/%04x/%04x Ram=%06x/%06x/%06x/%06x Ignored=%03x\n",
1560 pVM->pgm.s.LiveSave.Rom.cReadyPages,
1561 pVM->pgm.s.LiveSave.Rom.cDirtyPages,
1562 pVM->pgm.s.LiveSave.Rom.cZeroPages,
1563 pVM->pgm.s.LiveSave.Rom.cMonitoredPages,
1564 pVM->pgm.s.LiveSave.Mmio2.cReadyPages,
1565 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages,
1566 pVM->pgm.s.LiveSave.Mmio2.cZeroPages,
1567 pVM->pgm.s.LiveSave.Mmio2.cMonitoredPages,
1568 pVM->pgm.s.LiveSave.Ram.cReadyPages,
1569 pVM->pgm.s.LiveSave.Ram.cDirtyPages,
1570 pVM->pgm.s.LiveSave.Ram.cZeroPages,
1571 pVM->pgm.s.LiveSave.Ram.cMonitoredPages,
1572 pVM->pgm.s.LiveSave.cIgnoredPages
1573 );
1574 static int s_iHack = 0;
1575 if ((++s_iHack % 42) == 0)
1576 return VINF_SUCCESS;
1577#else
1578 if ( pVM->pgm.s.LiveSave.Rom.cDirtyPages
1579 + pVM->pgm.s.LiveSave.Mmio2.cDirtyPages
1580 + pVM->pgm.s.LiveSave.Ram.cDirtyPages
1581 < 256) /* semi random numbers. */
1582 return VINF_SUCCESS;
1583#endif
1584 return VINF_SSM_VOTE_FOR_ANOTHER_PASS;
1585}
1586
1587#ifndef VBOX_WITH_LIVE_MIGRATION
1588
1589/**
1590 * Save zero indicator + bits for the specified page.
1591 *
1592 * @returns VBox status code, errors are logged/asserted before returning.
1593 * @param pVM The VM handle.
1594 * @param pSSH The saved state handle.
1595 * @param pPage The page to save.
1596 * @param GCPhys The address of the page.
1597 * @param pRam The ram range (for error logging).
1598 */
1599static int pgmR3SavePage(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1600{
1601 int rc;
1602 if (PGM_PAGE_IS_ZERO(pPage))
1603 rc = SSMR3PutU8(pSSM, 0);
1604 else
1605 {
1606 void const *pvPage;
1607 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvPage);
1608 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
1609
1610 SSMR3PutU8(pSSM, 1);
1611 rc = SSMR3PutMem(pSSM, pvPage, PAGE_SIZE);
1612 }
1613 return rc;
1614}
1615
1616
1617/**
1618 * Save a shadowed ROM page.
1619 *
1620 * Format: Type, protection, and two pages with zero indicators.
1621 *
1622 * @returns VBox status code, errors are logged/asserted before returning.
1623 * @param pVM The VM handle.
1624 * @param pSSH The saved state handle.
1625 * @param pPage The page to save.
1626 * @param GCPhys The address of the page.
1627 * @param pRam The ram range (for error logging).
1628 */
1629static int pgmR3SaveShadowedRomPage(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1630{
1631 /* Need to save both pages and the current state. */
1632 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
1633 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_INTERNAL_ERROR);
1634
1635 SSMR3PutU8(pSSM, PGMPAGETYPE_ROM_SHADOW);
1636 SSMR3PutU8(pSSM, pRomPage->enmProt);
1637
1638 int rc = pgmR3SavePage(pVM, pSSM, pPage, GCPhys, pRam);
1639 if (RT_SUCCESS(rc))
1640 {
1641 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
1642 rc = pgmR3SavePage(pVM, pSSM, pPagePassive, GCPhys, pRam);
1643 }
1644 return rc;
1645}
1646
1647#endif /* !VBOX_WITH_LIVE_MIGRATION */
1648
1649
1650/**
1651 * Prepare for a live save operation.
1652 *
1653 * This will attempt to allocate and initialize the tracking structures. It
1654 * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
1655 * pgmR3SaveDone will do the cleanups.
1656 *
1657 * @returns VBox status code.
1658 *
1659 * @param pVM The VM handle.
1660 * @param pSSM The SSM handle.
1661 */
1662static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
1663{
1664 /*
1665 * Indicate that we will be using the write monitoring.
1666 */
1667 pgmLock(pVM);
1668 /** @todo find a way of mediating this when more users are added. */
1669 if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
1670 {
1671 pgmUnlock(pVM);
1672 AssertLogRelFailedReturn(VERR_INTERNAL_ERROR_2);
1673 }
1674 pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
1675 pgmUnlock(pVM);
1676
1677 /*
1678 * Initialize the statistics.
1679 */
1680 pVM->pgm.s.LiveSave.Rom.cReadyPages = 0;
1681 pVM->pgm.s.LiveSave.Rom.cDirtyPages = 0;
1682 pVM->pgm.s.LiveSave.Mmio2.cReadyPages = 0;
1683 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages = 0;
1684 pVM->pgm.s.LiveSave.Ram.cReadyPages = 0;
1685 pVM->pgm.s.LiveSave.Ram.cDirtyPages = 0;
1686 pVM->pgm.s.LiveSave.cIgnoredPages = 0;
1687 pVM->pgm.s.LiveSave.fActive = true;
1688
1689 /*
1690 * Per page type.
1691 */
1692 int rc = pgmR3PrepRomPages(pVM);
1693 if (RT_SUCCESS(rc))
1694 rc = pgmR3PrepMmio2Pages(pVM);
1695 if (RT_SUCCESS(rc))
1696 rc = pgmR3PrepRamPages(pVM);
1697 return rc;
1698}
1699
1700
1701/**
1702 * Execute state save operation.
1703 *
1704 * @returns VBox status code.
1705 * @param pVM VM Handle.
1706 * @param pSSM SSM operation handle.
1707 */
1708static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
1709{
1710 int rc;
1711 unsigned i;
1712 PPGM pPGM = &pVM->pgm.s;
1713
1714 /*
1715 * Lock PGM and set the no-more-writes indicator.
1716 */
1717 pgmLock(pVM);
1718 pVM->pgm.s.fNoMorePhysWrites = true;
1719
1720 /*
1721 * Save basic data (required / unaffected by relocation).
1722 */
1723 SSMR3PutStruct(pSSM, pPGM, &s_aPGMFields[0]);
1724
1725 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1726 {
1727 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1728 SSMR3PutStruct(pSSM, &pVCpu->pgm.s, &s_aPGMCpuFields[0]);
1729 }
1730
1731 /*
1732 * The guest mappings.
1733 */
1734 i = 0;
1735 for (PPGMMAPPING pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3, i++)
1736 {
1737 SSMR3PutU32( pSSM, i);
1738 SSMR3PutStrZ( pSSM, pMapping->pszDesc); /* This is the best unique id we have... */
1739 SSMR3PutGCPtr( pSSM, pMapping->GCPtr);
1740 SSMR3PutGCUIntPtr(pSSM, pMapping->cPTs);
1741 }
1742 rc = SSMR3PutU32(pSSM, ~0); /* terminator. */
1743
1744#ifdef VBOX_WITH_LIVE_MIGRATION
1745 /*
1746 * Save the (remainder of the) memory.
1747 */
1748 if (RT_SUCCESS(rc))
1749 {
1750 if (pVM->pgm.s.LiveSave.fActive)
1751 {
1752 pgmR3ScanRomPages(pVM);
1753 pgmR3ScanMmio2Pages(pVM, SSM_PASS_FINAL);
1754 pgmR3ScanRamPages(pVM, true /*fFinalPass*/);
1755
1756 rc = pgmR3SaveShadowedRomPages( pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
1757 if (RT_SUCCESS(rc))
1758 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
1759 if (RT_SUCCESS(rc))
1760 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
1761 }
1762 else
1763 {
1764 rc = pgmR3SaveRomRanges(pVM, pSSM);
1765 if (RT_SUCCESS(rc))
1766 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1767 if (RT_SUCCESS(rc))
1768 rc = pgmR3SaveRomVirginPages( pVM, pSSM, false /*fLiveSave*/);
1769 if (RT_SUCCESS(rc))
1770 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
1771 if (RT_SUCCESS(rc))
1772 rc = pgmR3SaveMmio2Pages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
1773 if (RT_SUCCESS(rc))
1774 rc = pgmR3SaveRamPages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
1775 }
1776 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
1777 }
1778
1779#else /* !VBOX_WITH_LIVE_MIGRATION */
1780 /*
1781 * Ram ranges and the memory they describe.
1782 */
1783 i = 0;
1784 for (PPGMRAMRANGE pRam = pPGM->pRamRangesR3; pRam; pRam = pRam->pNextR3, i++)
1785 {
1786 /*
1787 * Save the ram range details.
1788 */
1789 SSMR3PutU32(pSSM, i);
1790 SSMR3PutGCPhys(pSSM, pRam->GCPhys);
1791 SSMR3PutGCPhys(pSSM, pRam->GCPhysLast);
1792 SSMR3PutGCPhys(pSSM, pRam->cb);
1793 SSMR3PutU8(pSSM, !!pRam->pvR3); /* Boolean indicating memory or not. */
1794 SSMR3PutStrZ(pSSM, pRam->pszDesc); /* This is the best unique id we have... */
1795
1796 /*
1797 * Iterate the pages, only two special case.
1798 */
1799 uint32_t const cPages = pRam->cb >> PAGE_SHIFT;
1800 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1801 {
1802 RTGCPHYS GCPhysPage = pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1803 PPGMPAGE pPage = &pRam->aPages[iPage];
1804 uint8_t uType = PGM_PAGE_GET_TYPE(pPage);
1805
1806 if (uType == PGMPAGETYPE_ROM_SHADOW) /** @todo This isn't right, but it doesn't currently matter. */
1807 rc = pgmR3SaveShadowedRomPage(pVM, pSSM, pPage, GCPhysPage, pRam);
1808 else if (uType == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1809 {
1810 /* MMIO2 alias -> MMIO; the device will just have to deal with this. */
1811 SSMR3PutU8(pSSM, PGMPAGETYPE_MMIO);
1812 rc = SSMR3PutU8(pSSM, 0 /* ZERO */);
1813 }
1814 else
1815 {
1816 SSMR3PutU8(pSSM, uType);
1817 rc = pgmR3SavePage(pVM, pSSM, pPage, GCPhysPage, pRam);
1818 }
1819 if (RT_FAILURE(rc))
1820 break;
1821 }
1822 if (RT_FAILURE(rc))
1823 break;
1824 }
1825
1826 rc = SSMR3PutU32(pSSM, ~0); /* terminator. */
1827#endif /* !VBOX_WITH_LIVE_MIGRATION */
1828
1829 pgmUnlock(pVM);
1830 return rc;
1831}
1832
1833
1834/**
1835 * Cleans up after an save state operation.
1836 *
1837 * @returns VBox status code.
1838 * @param pVM VM Handle.
1839 * @param pSSM SSM operation handle.
1840 */
1841static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
1842{
1843 /*
1844 * Do per page type cleanups first.
1845 */
1846 if (pVM->pgm.s.LiveSave.fActive)
1847 {
1848 pgmR3DoneRomPages(pVM);
1849 pgmR3DoneMmio2Pages(pVM);
1850 pgmR3DoneRamPages(pVM);
1851 }
1852
1853 /*
1854 * Clear the live save indicator and disengage write monitoring.
1855 */
1856 pgmLock(pVM);
1857 pVM->pgm.s.LiveSave.fActive = false;
1858 /** @todo this is blindly assuming that we're the only user of write
1859 * monitoring. Fix this when more users are added. */
1860 pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
1861 pgmUnlock(pVM);
1862
1863 return VINF_SUCCESS;
1864}
1865
1866
1867/**
1868 * Prepare state load operation.
1869 *
1870 * @returns VBox status code.
1871 * @param pVM VM Handle.
1872 * @param pSSM SSM operation handle.
1873 */
1874static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
1875{
1876 /*
1877 * Call the reset function to make sure all the memory is cleared.
1878 */
1879 PGMR3Reset(pVM);
1880 pVM->pgm.s.LiveSave.fActive = false;
1881 NOREF(pSSM);
1882 return VINF_SUCCESS;
1883}
1884
1885
1886/**
1887 * Load an ignored page.
1888 *
1889 * @returns VBox status code.
1890 * @param pSSM The saved state handle.
1891 */
1892static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
1893{
1894 uint8_t abPage[PAGE_SIZE];
1895 return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
1896}
1897
1898
1899/**
1900 * Loads a page without any bits in the saved state, i.e. making sure it's
1901 * really zero.
1902 *
1903 * @returns VBox status code.
1904 * @param pVM The VM handle.
1905 * @param uType The page type or PGMPAGETYPE_INVALID (old saved
1906 * state).
1907 * @param pPage The guest page tracking structure.
1908 * @param GCPhys The page address.
1909 * @param pRam The ram range (logging).
1910 */
1911static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1912{
1913 if ( PGM_PAGE_GET_TYPE(pPage) != uType
1914 && uType != PGMPAGETYPE_INVALID)
1915 return VERR_SSM_UNEXPECTED_DATA;
1916
1917 /* I think this should be sufficient. */
1918 if (!PGM_PAGE_IS_ZERO(pPage))
1919 return VERR_SSM_UNEXPECTED_DATA;
1920
1921 NOREF(pVM);
1922 NOREF(GCPhys);
1923 NOREF(pRam);
1924 return VINF_SUCCESS;
1925}
1926
1927
1928/**
1929 * Loads a page from the saved state.
1930 *
1931 * @returns VBox status code.
1932 * @param pVM The VM handle.
1933 * @param pSSM The SSM handle.
1934 * @param uType The page type or PGMPAGETYEP_INVALID (old saved
1935 * state).
1936 * @param pPage The guest page tracking structure.
1937 * @param GCPhys The page address.
1938 * @param pRam The ram range (logging).
1939 */
1940static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1941{
1942 /*
1943 * Match up the type, dealing with MMIO2 aliases (dropped).
1944 */
1945 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == uType
1946 || uType == PGMPAGETYPE_INVALID,
1947 ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
1948 VERR_SSM_UNEXPECTED_DATA);
1949
1950 /*
1951 * Load the page.
1952 */
1953 void *pvPage;
1954 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage);
1955 if (RT_SUCCESS(rc))
1956 rc = SSMR3GetMem(pSSM, pvPage, PAGE_SIZE);
1957
1958 return rc;
1959}
1960
1961
1962/**
1963 * Loads a page (counter part to pgmR3SavePage).
1964 *
1965 * @returns VBox status code, fully bitched errors.
1966 * @param pVM The VM handle.
1967 * @param pSSM The SSM handle.
1968 * @param uType The page type.
1969 * @param pPage The page.
1970 * @param GCPhys The page address.
1971 * @param pRam The RAM range (for error messages).
1972 */
1973static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1974{
1975 uint8_t uState;
1976 int rc = SSMR3GetU8(pSSM, &uState);
1977 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
1978 if (uState == 0 /* zero */)
1979 rc = pgmR3LoadPageZeroOld(pVM, uType, pPage, GCPhys, pRam);
1980 else if (uState == 1)
1981 rc = pgmR3LoadPageBitsOld(pVM, pSSM, uType, pPage, GCPhys, pRam);
1982 else
1983 rc = VERR_INTERNAL_ERROR;
1984 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uType=%d GCPhys=%RGp %s rc=%Rrc\n",
1985 pPage, uState, uType, GCPhys, pRam->pszDesc, rc),
1986 rc);
1987 return VINF_SUCCESS;
1988}
1989
1990
1991/**
1992 * Loads a shadowed ROM page.
1993 *
1994 * @returns VBox status code, errors are fully bitched.
1995 * @param pVM The VM handle.
1996 * @param pSSM The saved state handle.
1997 * @param pPage The page.
1998 * @param GCPhys The page address.
1999 * @param pRam The RAM range (for error messages).
2000 */
2001static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2002{
2003 /*
2004 * Load and set the protection first, then load the two pages, the first
2005 * one is the active the other is the passive.
2006 */
2007 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
2008 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_INTERNAL_ERROR);
2009
2010 uint8_t uProt;
2011 int rc = SSMR3GetU8(pSSM, &uProt);
2012 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
2013 PGMROMPROT enmProt = (PGMROMPROT)uProt;
2014 AssertLogRelMsgReturn( enmProt >= PGMROMPROT_INVALID
2015 && enmProt < PGMROMPROT_END,
2016 ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
2017 VERR_SSM_UNEXPECTED_DATA);
2018
2019 if (pRomPage->enmProt != enmProt)
2020 {
2021 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
2022 AssertLogRelRCReturn(rc, rc);
2023 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_INTERNAL_ERROR);
2024 }
2025
2026 PPGMPAGE pPageActive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2027 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2028 uint8_t u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM : PGMPAGETYPE_ROM_SHADOW;
2029 uint8_t u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
2030
2031 /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
2032 * used down the line (will the 2nd page will be written to the first
2033 * one because of a false TLB hit since the TLB is using GCPhys and
2034 * doesn't check the HCPhys of the desired page). */
2035 rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
2036 if (RT_SUCCESS(rc))
2037 {
2038 *pPageActive = *pPage;
2039 rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
2040 }
2041 return rc;
2042}
2043
2044/**
2045 * Ram range flags and bits for older versions of the saved state.
2046 *
2047 * @returns VBox status code.
2048 *
2049 * @param pVM The VM handle
2050 * @param pSSM The SSM handle.
2051 * @param uVersion The saved state version.
2052 */
2053static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2054{
2055 PPGM pPGM = &pVM->pgm.s;
2056
2057 /*
2058 * Ram range flags and bits.
2059 */
2060 uint32_t i = 0;
2061 for (PPGMRAMRANGE pRam = pPGM->pRamRangesR3; ; pRam = pRam->pNextR3, i++)
2062 {
2063 /* Check the seqence number / separator. */
2064 uint32_t u32Sep;
2065 int rc = SSMR3GetU32(pSSM, &u32Sep);
2066 if (RT_FAILURE(rc))
2067 return rc;
2068 if (u32Sep == ~0U)
2069 break;
2070 if (u32Sep != i)
2071 {
2072 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2073 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2074 }
2075 AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2076
2077 /* Get the range details. */
2078 RTGCPHYS GCPhys;
2079 SSMR3GetGCPhys(pSSM, &GCPhys);
2080 RTGCPHYS GCPhysLast;
2081 SSMR3GetGCPhys(pSSM, &GCPhysLast);
2082 RTGCPHYS cb;
2083 SSMR3GetGCPhys(pSSM, &cb);
2084 uint8_t fHaveBits;
2085 rc = SSMR3GetU8(pSSM, &fHaveBits);
2086 if (RT_FAILURE(rc))
2087 return rc;
2088 if (fHaveBits & ~1)
2089 {
2090 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2091 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2092 }
2093 size_t cchDesc = 0;
2094 char szDesc[256];
2095 szDesc[0] = '\0';
2096 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2097 {
2098 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
2099 if (RT_FAILURE(rc))
2100 return rc;
2101 /* Since we've modified the description strings in r45878, only compare
2102 them if the saved state is more recent. */
2103 if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
2104 cchDesc = strlen(szDesc);
2105 }
2106
2107 /*
2108 * Match it up with the current range.
2109 *
2110 * Note there is a hack for dealing with the high BIOS mapping
2111 * in the old saved state format, this means we might not have
2112 * a 1:1 match on success.
2113 */
2114 if ( ( GCPhys != pRam->GCPhys
2115 || GCPhysLast != pRam->GCPhysLast
2116 || cb != pRam->cb
2117 || ( cchDesc
2118 && strcmp(szDesc, pRam->pszDesc)) )
2119 /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
2120 && ( uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
2121 || GCPhys != UINT32_C(0xfff80000)
2122 || GCPhysLast != UINT32_C(0xffffffff)
2123 || pRam->GCPhysLast != GCPhysLast
2124 || pRam->GCPhys < GCPhys
2125 || !fHaveBits)
2126 )
2127 {
2128 LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
2129 "State : %RGp-%RGp %RGp bytes %s %s\n",
2130 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc,
2131 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
2132 /*
2133 * If we're loading a state for debugging purpose, don't make a fuss if
2134 * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
2135 */
2136 if ( SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
2137 || GCPhys < 8 * _1M)
2138 AssertFailedReturn(VERR_SSM_LOAD_CONFIG_MISMATCH);
2139
2140 AssertMsgFailed(("debug skipping not implemented, sorry\n"));
2141 continue;
2142 }
2143
2144 uint32_t cPages = (GCPhysLast - GCPhys + 1) >> PAGE_SHIFT;
2145 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2146 {
2147 /*
2148 * Load the pages one by one.
2149 */
2150 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2151 {
2152 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2153 PPGMPAGE pPage = &pRam->aPages[iPage];
2154 uint8_t uType;
2155 rc = SSMR3GetU8(pSSM, &uType);
2156 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
2157 if (uType == PGMPAGETYPE_ROM_SHADOW)
2158 rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
2159 else
2160 rc = pgmR3LoadPageOld(pVM, pSSM, uType, pPage, GCPhysPage, pRam);
2161 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2162 }
2163 }
2164 else
2165 {
2166 /*
2167 * Old format.
2168 */
2169 AssertLogRelReturn(!pVM->pgm.s.fRamPreAlloc, VERR_NOT_SUPPORTED); /* can't be detected. */
2170
2171 /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
2172 The rest is generally irrelevant and wrong since the stuff have to match registrations. */
2173 uint32_t fFlags = 0;
2174 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2175 {
2176 uint16_t u16Flags;
2177 rc = SSMR3GetU16(pSSM, &u16Flags);
2178 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2179 fFlags |= u16Flags;
2180 }
2181
2182 /* Load the bits */
2183 if ( !fHaveBits
2184 && GCPhysLast < UINT32_C(0xe0000000))
2185 {
2186 /*
2187 * Dynamic chunks.
2188 */
2189 const uint32_t cPagesInChunk = (1*1024*1024) >> PAGE_SHIFT;
2190 AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
2191 ("cPages=%#x cPagesInChunk=%#x\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
2192 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2193
2194 for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
2195 {
2196 uint8_t fPresent;
2197 rc = SSMR3GetU8(pSSM, &fPresent);
2198 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2199 AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
2200 ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
2201 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2202
2203 for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
2204 {
2205 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2206 PPGMPAGE pPage = &pRam->aPages[iPage];
2207 if (fPresent)
2208 {
2209 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
2210 rc = pgmR3LoadPageToDevNullOld(pSSM);
2211 else
2212 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2213 }
2214 else
2215 rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2216 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2217 }
2218 }
2219 }
2220 else if (pRam->pvR3)
2221 {
2222 /*
2223 * MMIO2.
2224 */
2225 AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
2226 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2227 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2228 AssertLogRelMsgReturn(pRam->pvR3,
2229 ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
2230 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2231
2232 rc = SSMR3GetMem(pSSM, pRam->pvR3, pRam->cb);
2233 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
2234 }
2235 else if (GCPhysLast < UINT32_C(0xfff80000))
2236 {
2237 /*
2238 * PCI MMIO, no pages saved.
2239 */
2240 }
2241 else
2242 {
2243 /*
2244 * Load the 0xfff80000..0xffffffff BIOS range.
2245 * It starts with X reserved pages that we have to skip over since
2246 * the RAMRANGE create by the new code won't include those.
2247 */
2248 AssertLogRelMsgReturn( !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
2249 && (fFlags & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
2250 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2251 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2252 AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
2253 ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
2254 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2255
2256 /* Skip wasted reserved pages before the ROM. */
2257 while (GCPhys < pRam->GCPhys)
2258 {
2259 rc = pgmR3LoadPageToDevNullOld(pSSM);
2260 GCPhys += PAGE_SIZE;
2261 }
2262
2263 /* Load the bios pages. */
2264 cPages = pRam->cb >> PAGE_SHIFT;
2265 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2266 {
2267 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2268 PPGMPAGE pPage = &pRam->aPages[iPage];
2269
2270 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
2271 ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
2272 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2273 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
2274 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2275 }
2276 }
2277 }
2278 }
2279
2280 return VINF_SUCCESS;
2281}
2282
2283
2284/**
2285 * Worker for pgmR3Load and pgmR3LoadLocked.
2286 *
2287 * @returns VBox status code.
2288 *
2289 * @param pVM The VM handle.
2290 * @param pSSM The SSM handle.
2291 * @param uVersion The saved state version.
2292 *
2293 * @todo This needs splitting up if more record types or code twists are
2294 * added...
2295 */
2296static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
2297{
2298 /*
2299 * Process page records until we hit the terminator.
2300 */
2301 RTGCPHYS GCPhys = NIL_RTGCPHYS;
2302 PPGMRAMRANGE pRamHint = NULL;
2303 uint8_t id = UINT8_MAX;
2304 uint32_t iPage = UINT32_MAX - 10;
2305 PPGMROMRANGE pRom = NULL;
2306 PPGMMMIO2RANGE pMmio2 = NULL;
2307 for (;;)
2308 {
2309 /*
2310 * Get the record type and flags.
2311 */
2312 uint8_t u8;
2313 int rc = SSMR3GetU8(pSSM, &u8);
2314 if (RT_FAILURE(rc))
2315 return rc;
2316 if (u8 == PGM_STATE_REC_END)
2317 return VINF_SUCCESS;
2318 AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2319 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2320 {
2321 /*
2322 * RAM page.
2323 */
2324 case PGM_STATE_REC_RAM_ZERO:
2325 case PGM_STATE_REC_RAM_RAW:
2326 {
2327 /*
2328 * Get the address and resolve it into a page descriptor.
2329 */
2330 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2331 GCPhys += PAGE_SIZE;
2332 else
2333 {
2334 rc = SSMR3GetGCPhys(pSSM, &GCPhys);
2335 if (RT_FAILURE(rc))
2336 return rc;
2337 }
2338 AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2339
2340 PPGMPAGE pPage;
2341 rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, GCPhys, &pPage, &pRamHint);
2342 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2343
2344 /*
2345 * Take action according to the record type.
2346 */
2347 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2348 {
2349 case PGM_STATE_REC_RAM_ZERO:
2350 {
2351 if (PGM_PAGE_IS_ZERO(pPage))
2352 break;
2353 /** @todo implement zero page replacing. */
2354 AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_INTERNAL_ERROR_5);
2355 void *pvDstPage;
2356 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage);
2357 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2358 ASMMemZeroPage(pvDstPage);
2359 break;
2360 }
2361
2362 case PGM_STATE_REC_RAM_RAW:
2363 {
2364 void *pvDstPage;
2365 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage);
2366 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2367 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2368 if (RT_FAILURE(rc))
2369 return rc;
2370 break;
2371 }
2372
2373 default:
2374 AssertMsgFailedReturn(("%#x\n", u8), VERR_INTERNAL_ERROR);
2375 }
2376 id = UINT8_MAX;
2377 break;
2378 }
2379
2380 /*
2381 * MMIO2 page.
2382 */
2383 case PGM_STATE_REC_MMIO2_RAW:
2384 case PGM_STATE_REC_MMIO2_ZERO:
2385 {
2386 /*
2387 * Get the ID + page number and resolved that into a MMIO2 page.
2388 */
2389 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2390 iPage++;
2391 else
2392 {
2393 SSMR3GetU8(pSSM, &id);
2394 rc = SSMR3GetU32(pSSM, &iPage);
2395 if (RT_FAILURE(rc))
2396 return rc;
2397 }
2398 if ( !pMmio2
2399 || pMmio2->idSavedState != id)
2400 {
2401 for (pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
2402 if (pMmio2->idSavedState == id)
2403 break;
2404 AssertLogRelMsgReturn(pMmio2, ("id=%#u iPage=%#x\n", id, iPage), VERR_INTERNAL_ERROR);
2405 }
2406 AssertLogRelMsgReturn(iPage < (pMmio2->RamRange.cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pMmio2->RamRange.cb, pMmio2->RamRange.pszDesc), VERR_INTERNAL_ERROR);
2407 void *pvDstPage = (uint8_t *)pMmio2->RamRange.pvR3 + ((size_t)iPage << PAGE_SHIFT);
2408
2409 /*
2410 * Load the page bits.
2411 */
2412 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_MMIO2_ZERO)
2413 ASMMemZeroPage(pvDstPage);
2414 else
2415 {
2416 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2417 if (RT_FAILURE(rc))
2418 return rc;
2419 }
2420 GCPhys = NIL_RTGCPHYS;
2421 break;
2422 }
2423
2424 /*
2425 * ROM pages.
2426 */
2427 case PGM_STATE_REC_ROM_VIRGIN:
2428 case PGM_STATE_REC_ROM_SHW_RAW:
2429 case PGM_STATE_REC_ROM_SHW_ZERO:
2430 case PGM_STATE_REC_ROM_PROT:
2431 {
2432 /*
2433 * Get the ID + page number and resolved that into a ROM page descriptor.
2434 */
2435 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2436 iPage++;
2437 else
2438 {
2439 SSMR3GetU8(pSSM, &id);
2440 rc = SSMR3GetU32(pSSM, &iPage);
2441 if (RT_FAILURE(rc))
2442 return rc;
2443 }
2444 if ( !pRom
2445 || pRom->idSavedState != id)
2446 {
2447 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2448 if (pRom->idSavedState == id)
2449 break;
2450 AssertLogRelMsgReturn(pRom, ("id=%#u iPage=%#x\n", id, iPage), VERR_INTERNAL_ERROR);
2451 }
2452 AssertLogRelMsgReturn(iPage < (pRom->cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pRom->cb, pRom->pszDesc), VERR_INTERNAL_ERROR);
2453 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2454 GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
2455
2456 /*
2457 * Get and set the protection.
2458 */
2459 uint8_t u8Prot;
2460 rc = SSMR3GetU8(pSSM, &u8Prot);
2461 if (RT_FAILURE(rc))
2462 return rc;
2463 PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
2464 AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_INTERNAL_ERROR);
2465
2466 if (enmProt != pRomPage->enmProt)
2467 {
2468 AssertLogRelMsgReturn(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED,
2469 ("GCPhys=%RGp enmProt=%d %s\n", GCPhys, enmProt, pRom->pszDesc),
2470 VERR_SSM_LOAD_CONFIG_MISMATCH);
2471 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
2472 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2473 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_INTERNAL_ERROR);
2474 }
2475 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
2476 break; /* done */
2477
2478 /*
2479 * Get the right page descriptor.
2480 */
2481 PPGMPAGE pRealPage;
2482 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2483 {
2484 case PGM_STATE_REC_ROM_VIRGIN:
2485 if (!PGMROMPROT_IS_ROM(enmProt))
2486 pRealPage = &pRomPage->Virgin;
2487 else
2488 pRealPage = NULL;
2489 break;
2490
2491 case PGM_STATE_REC_ROM_SHW_RAW:
2492 case PGM_STATE_REC_ROM_SHW_ZERO:
2493 AssertLogRelMsgReturn(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED,
2494 ("GCPhys=%RGp enmProt=%d %s\n", GCPhys, enmProt, pRom->pszDesc),
2495 VERR_SSM_LOAD_CONFIG_MISMATCH);
2496 if (PGMROMPROT_IS_ROM(enmProt))
2497 pRealPage = &pRomPage->Shadow;
2498 else
2499 pRealPage = NULL;
2500 break;
2501
2502 default: AssertLogRelFailedReturn(VERR_INTERNAL_ERROR); /* shut up gcc */
2503 }
2504 if (!pRealPage)
2505 {
2506 rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, GCPhys, &pRealPage, &pRamHint);
2507 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2508 }
2509
2510 /*
2511 * Make it writable and map it (if necessary).
2512 */
2513 void *pvDstPage = NULL;
2514 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2515 {
2516 case PGM_STATE_REC_ROM_SHW_ZERO:
2517 if (PGM_PAGE_IS_ZERO(pRealPage))
2518 break;
2519 /** @todo implement zero page replacing. */
2520 /* fall thru */
2521 case PGM_STATE_REC_ROM_VIRGIN:
2522 case PGM_STATE_REC_ROM_SHW_RAW:
2523 {
2524 rc = pgmPhysPageMakeWritableAndMap(pVM, pRealPage, GCPhys, &pvDstPage);
2525 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2526 break;
2527 }
2528 }
2529
2530 /*
2531 * Load the bits.
2532 */
2533 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2534 {
2535 case PGM_STATE_REC_ROM_SHW_ZERO:
2536 if (pvDstPage)
2537 ASMMemZeroPage(pvDstPage);
2538 break;
2539
2540 case PGM_STATE_REC_ROM_VIRGIN:
2541 case PGM_STATE_REC_ROM_SHW_RAW:
2542 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2543 if (RT_FAILURE(rc))
2544 return rc;
2545 break;
2546 }
2547 GCPhys = NIL_RTGCPHYS;
2548 break;
2549 }
2550
2551 /*
2552 * Unknown type.
2553 */
2554 default:
2555 AssertLogRelMsgFailedReturn(("%#x\n", u8), VERR_INTERNAL_ERROR);
2556 }
2557 } /* forever */
2558}
2559
2560
2561/**
2562 * Worker for pgmR3Load.
2563 *
2564 * @returns VBox status code.
2565 *
2566 * @param pVM The VM handle.
2567 * @param pSSM The SSM handle.
2568 * @param uVersion The saved state version.
2569 */
2570static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2571{
2572 PPGM pPGM = &pVM->pgm.s;
2573 int rc;
2574 uint32_t u32Sep;
2575
2576 /*
2577 * Load basic data (required / unaffected by relocation).
2578 */
2579 if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
2580 {
2581 rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFields[0]);
2582 AssertLogRelRCReturn(rc, rc);
2583
2584 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2585 {
2586 rc = SSMR3GetStruct(pSSM, &pVM->aCpus[i].pgm.s, &s_aPGMCpuFields[0]);
2587 AssertLogRelRCReturn(rc, rc);
2588 }
2589 }
2590 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2591 {
2592 AssertRelease(pVM->cCpus == 1);
2593
2594 PGMOLD pgmOld;
2595 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
2596 AssertLogRelRCReturn(rc, rc);
2597
2598 pPGM->fMappingsFixed = pgmOld.fMappingsFixed;
2599 pPGM->GCPtrMappingFixed = pgmOld.GCPtrMappingFixed;
2600 pPGM->cbMappingFixed = pgmOld.cbMappingFixed;
2601
2602 pVM->aCpus[0].pgm.s.fA20Enabled = pgmOld.fA20Enabled;
2603 pVM->aCpus[0].pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
2604 pVM->aCpus[0].pgm.s.enmGuestMode = pgmOld.enmGuestMode;
2605 }
2606 else
2607 {
2608 AssertRelease(pVM->cCpus == 1);
2609
2610 SSMR3GetBool(pSSM, &pPGM->fMappingsFixed);
2611 SSMR3GetGCPtr(pSSM, &pPGM->GCPtrMappingFixed);
2612 SSMR3GetU32(pSSM, &pPGM->cbMappingFixed);
2613
2614 uint32_t cbRamSizeIgnored;
2615 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
2616 if (RT_FAILURE(rc))
2617 return rc;
2618 SSMR3GetGCPhys(pSSM, &pVM->aCpus[0].pgm.s.GCPhysA20Mask);
2619
2620 uint32_t u32 = 0;
2621 SSMR3GetUInt(pSSM, &u32);
2622 pVM->aCpus[0].pgm.s.fA20Enabled = !!u32;
2623 SSMR3GetUInt(pSSM, &pVM->aCpus[0].pgm.s.fSyncFlags);
2624 RTUINT uGuestMode;
2625 SSMR3GetUInt(pSSM, &uGuestMode);
2626 pVM->aCpus[0].pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
2627
2628 /* check separator. */
2629 SSMR3GetU32(pSSM, &u32Sep);
2630 if (RT_FAILURE(rc))
2631 return rc;
2632 if (u32Sep != (uint32_t)~0)
2633 {
2634 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
2635 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2636 }
2637 }
2638
2639 /*
2640 * The guest mappings.
2641 */
2642 uint32_t i = 0;
2643 for (;; i++)
2644 {
2645 /* Check the seqence number / separator. */
2646 rc = SSMR3GetU32(pSSM, &u32Sep);
2647 if (RT_FAILURE(rc))
2648 return rc;
2649 if (u32Sep == ~0U)
2650 break;
2651 if (u32Sep != i)
2652 {
2653 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2654 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2655 }
2656
2657 /* get the mapping details. */
2658 char szDesc[256];
2659 szDesc[0] = '\0';
2660 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
2661 if (RT_FAILURE(rc))
2662 return rc;
2663 RTGCPTR GCPtr;
2664 SSMR3GetGCPtr(pSSM, &GCPtr);
2665 RTGCPTR cPTs;
2666 rc = SSMR3GetGCUIntPtr(pSSM, &cPTs);
2667 if (RT_FAILURE(rc))
2668 return rc;
2669
2670 /* find matching range. */
2671 PPGMMAPPING pMapping;
2672 for (pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3)
2673 if ( pMapping->cPTs == cPTs
2674 && !strcmp(pMapping->pszDesc, szDesc))
2675 break;
2676 AssertLogRelMsgReturn(pMapping, ("Couldn't find mapping: cPTs=%#x szDesc=%s (GCPtr=%RGv)\n",
2677 cPTs, szDesc, GCPtr),
2678 VERR_SSM_LOAD_CONFIG_MISMATCH);
2679
2680 /* relocate it. */
2681 if (pMapping->GCPtr != GCPtr)
2682 {
2683 AssertMsg((GCPtr >> X86_PD_SHIFT << X86_PD_SHIFT) == GCPtr, ("GCPtr=%RGv\n", GCPtr));
2684 pgmR3MapRelocate(pVM, pMapping, pMapping->GCPtr, GCPtr);
2685 }
2686 else
2687 Log(("pgmR3Load: '%s' needed no relocation (%RGv)\n", szDesc, GCPtr));
2688 }
2689
2690 /*
2691 * Load the RAM contents.
2692 */
2693 if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
2694 {
2695 if (!pVM->pgm.s.LiveSave.fActive)
2696 {
2697 rc = pgmR3LoadRomRanges(pVM, pSSM);
2698 if (RT_FAILURE(rc))
2699 return rc;
2700 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
2701 if (RT_FAILURE(rc))
2702 return rc;
2703 }
2704
2705 return pgmR3LoadMemory(pVM, pSSM, SSM_PASS_FINAL);
2706 }
2707 return pgmR3LoadMemoryOld(pVM, pSSM, uVersion);
2708}
2709
2710
2711/**
2712 * Execute state load operation.
2713 *
2714 * @returns VBox status code.
2715 * @param pVM VM Handle.
2716 * @param pSSM SSM operation handle.
2717 * @param uVersion Data layout version.
2718 * @param uPass The data pass.
2719 */
2720static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2721{
2722 int rc;
2723 PPGM pPGM = &pVM->pgm.s;
2724
2725 /*
2726 * Validate version.
2727 */
2728 if ( ( uPass != SSM_PASS_FINAL
2729 && uVersion != PGM_SAVED_STATE_VERSION)
2730 || ( uVersion != PGM_SAVED_STATE_VERSION
2731 && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
2732 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
2733 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
2734 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
2735 )
2736 {
2737 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
2738 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
2739 }
2740
2741 /*
2742 * Do the loading while owning the lock because a bunch of the functions
2743 * we're using requires this.
2744 */
2745 if (uPass != SSM_PASS_FINAL)
2746 {
2747 pgmLock(pVM);
2748 if (uPass != 0)
2749 rc = pgmR3LoadMemory(pVM, pSSM, uPass);
2750 else
2751 {
2752 pVM->pgm.s.LiveSave.fActive = true;
2753 rc = pgmR3LoadRomRanges(pVM, pSSM);
2754 if (RT_SUCCESS(rc))
2755 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
2756 if (RT_SUCCESS(rc))
2757 rc = pgmR3LoadMemory(pVM, pSSM, uPass);
2758 }
2759 pgmUnlock(pVM);
2760 }
2761 else
2762 {
2763 pgmLock(pVM);
2764 rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
2765 pVM->pgm.s.LiveSave.fActive = false;
2766 pgmUnlock(pVM);
2767 if (RT_SUCCESS(rc))
2768 {
2769 /*
2770 * We require a full resync now.
2771 */
2772 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2773 {
2774 PVMCPU pVCpu = &pVM->aCpus[i];
2775 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2776 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2777
2778 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
2779 }
2780
2781 pgmR3HandlerPhysicalUpdateAll(pVM);
2782
2783 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2784 {
2785 PVMCPU pVCpu = &pVM->aCpus[i];
2786
2787 /*
2788 * Change the paging mode.
2789 */
2790 rc = PGMR3ChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode);
2791
2792 /* Restore pVM->pgm.s.GCPhysCR3. */
2793 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
2794 RTGCPHYS GCPhysCR3 = CPUMGetGuestCR3(pVCpu);
2795 if ( pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE
2796 || pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE_NX
2797 || pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64
2798 || pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
2799 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAE_PAGE_MASK);
2800 else
2801 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAGE_MASK);
2802 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2803 }
2804 }
2805 }
2806
2807 return rc;
2808}
2809
2810
2811/**
2812 * Registers the saved state callbacks with SSM.
2813 *
2814 * @returns VBox status code.
2815 * @param pVM Pointer to VM structure.
2816 * @param cbRam The RAM size.
2817 */
2818int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
2819{
2820 return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
2821 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
2822 NULL, pgmR3SaveExec, pgmR3SaveDone,
2823 pgmR3LoadPrep, pgmR3Load, NULL);
2824}
2825
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette