VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp@ 38990

Last change on this file since 38990 was 38953, checked in by vboxsync, 14 years ago

PGM: Attempt at fixing the VERR_MAP_FAILED during state save problem on 32-bit hosts when assigning lots of memory to the guest. PGM should lock down guest RAM pages before use and release them afterwards like everyone else. Still quite some stuff left to do there, so I've deviced a little hack for tracking unlocked mappings and using this as input when deciding to do async or sync chunk unmapping at save/load time. See xtracker #5912 and public ticket 7929.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 124.8 KB
Line 
1/* $Id: PGMSavedState.cpp 38953 2011-10-06 08:49:36Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, The Saved State Part.
4 */
5
6/*
7 * Copyright (C) 2006-2009 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/ssm.h>
26#include <VBox/vmm/pdmdrv.h>
27#include <VBox/vmm/pdmdev.h>
28#include "PGMInternal.h"
29#include <VBox/vmm/vm.h>
30#include "PGMInline.h"
31
32#include <VBox/param.h>
33#include <VBox/err.h>
34#include <VBox/vmm/ftm.h>
35
36#include <iprt/asm.h>
37#include <iprt/assert.h>
38#include <iprt/crc.h>
39#include <iprt/mem.h>
40#include <iprt/sha.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43
44
45/*******************************************************************************
46* Defined Constants And Macros *
47*******************************************************************************/
48/** Saved state data unit version. */
49#define PGM_SAVED_STATE_VERSION 14
50/** Saved state data unit version before the PAE PDPE registers. */
51#define PGM_SAVED_STATE_VERSION_PRE_PAE 13
52/** Saved state data unit version after this includes ballooned page flags in
53 * the state (see #5515). */
54#define PGM_SAVED_STATE_VERSION_BALLOON_BROKEN 12
55/** Saved state before the balloon change. */
56#define PGM_SAVED_STATE_VERSION_PRE_BALLOON 11
57/** Saved state data unit version used during 3.1 development, misses the RAM
58 * config. */
59#define PGM_SAVED_STATE_VERSION_NO_RAM_CFG 10
60/** Saved state data unit version for 3.0 (pre teleportation). */
61#define PGM_SAVED_STATE_VERSION_3_0_0 9
62/** Saved state data unit version for 2.2.2 and later. */
63#define PGM_SAVED_STATE_VERSION_2_2_2 8
64/** Saved state data unit version for 2.2.0. */
65#define PGM_SAVED_STATE_VERSION_RR_DESC 7
66/** Saved state data unit version. */
67#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6
68
69
70/** @name Sparse state record types
71 * @{ */
72/** Zero page. No data. */
73#define PGM_STATE_REC_RAM_ZERO UINT8_C(0x00)
74/** Raw page. */
75#define PGM_STATE_REC_RAM_RAW UINT8_C(0x01)
76/** Raw MMIO2 page. */
77#define PGM_STATE_REC_MMIO2_RAW UINT8_C(0x02)
78/** Zero MMIO2 page. */
79#define PGM_STATE_REC_MMIO2_ZERO UINT8_C(0x03)
80/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
81#define PGM_STATE_REC_ROM_VIRGIN UINT8_C(0x04)
82/** Raw shadowed ROM page. The protection (8-bit) precedes the raw bits. */
83#define PGM_STATE_REC_ROM_SHW_RAW UINT8_C(0x05)
84/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
85#define PGM_STATE_REC_ROM_SHW_ZERO UINT8_C(0x06)
86/** ROM protection (8-bit). */
87#define PGM_STATE_REC_ROM_PROT UINT8_C(0x07)
88/** Ballooned page. No data. */
89#define PGM_STATE_REC_RAM_BALLOONED UINT8_C(0x08)
90/** The last record type. */
91#define PGM_STATE_REC_LAST PGM_STATE_REC_RAM_BALLOONED
92/** End marker. */
93#define PGM_STATE_REC_END UINT8_C(0xff)
94/** Flag indicating that the data is preceded by the page address.
95 * For RAW pages this is a RTGCPHYS. For MMIO2 and ROM pages this is a 8-bit
96 * range ID and a 32-bit page index.
97 */
98#define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80)
99/** @} */
100
101/** The CRC-32 for a zero page. */
102#define PGM_STATE_CRC32_ZERO_PAGE UINT32_C(0xc71c0011)
103/** The CRC-32 for a zero half page. */
104#define PGM_STATE_CRC32_ZERO_HALF_PAGE UINT32_C(0xf1e8ba9e)
105
106
107/*******************************************************************************
108* Structures and Typedefs *
109*******************************************************************************/
110/** For loading old saved states. (pre-smp) */
111typedef struct
112{
113 /** If set no conflict checks are required. (boolean) */
114 bool fMappingsFixed;
115 /** Size of fixed mapping */
116 uint32_t cbMappingFixed;
117 /** Base address (GC) of fixed mapping */
118 RTGCPTR GCPtrMappingFixed;
119 /** A20 gate mask.
120 * Our current approach to A20 emulation is to let REM do it and don't bother
121 * anywhere else. The interesting guests will be operating with it enabled anyway.
122 * But should the need arise, we'll subject physical addresses to this mask. */
123 RTGCPHYS GCPhysA20Mask;
124 /** A20 gate state - boolean! */
125 bool fA20Enabled;
126 /** The guest paging mode. */
127 PGMMODE enmGuestMode;
128} PGMOLD;
129
130
131/*******************************************************************************
132* Global Variables *
133*******************************************************************************/
134/** PGM fields to save/load. */
135
136static const SSMFIELD s_aPGMFields[] =
137{
138 SSMFIELD_ENTRY( PGM, fMappingsFixed),
139 SSMFIELD_ENTRY_GCPTR( PGM, GCPtrMappingFixed),
140 SSMFIELD_ENTRY( PGM, cbMappingFixed),
141 SSMFIELD_ENTRY( PGM, cBalloonedPages),
142 SSMFIELD_ENTRY_TERM()
143};
144
145static const SSMFIELD s_aPGMFieldsPreBalloon[] =
146{
147 SSMFIELD_ENTRY( PGM, fMappingsFixed),
148 SSMFIELD_ENTRY_GCPTR( PGM, GCPtrMappingFixed),
149 SSMFIELD_ENTRY( PGM, cbMappingFixed),
150 SSMFIELD_ENTRY_TERM()
151};
152
153static const SSMFIELD s_aPGMCpuFields[] =
154{
155 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
156 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
157 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
158 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[0]),
159 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[1]),
160 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[2]),
161 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[3]),
162 SSMFIELD_ENTRY_TERM()
163};
164
165static const SSMFIELD s_aPGMCpuFieldsPrePae[] =
166{
167 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
168 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
169 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
170 SSMFIELD_ENTRY_TERM()
171};
172
173static const SSMFIELD s_aPGMFields_Old[] =
174{
175 SSMFIELD_ENTRY( PGMOLD, fMappingsFixed),
176 SSMFIELD_ENTRY_GCPTR( PGMOLD, GCPtrMappingFixed),
177 SSMFIELD_ENTRY( PGMOLD, cbMappingFixed),
178 SSMFIELD_ENTRY( PGMOLD, fA20Enabled),
179 SSMFIELD_ENTRY_GCPHYS( PGMOLD, GCPhysA20Mask),
180 SSMFIELD_ENTRY( PGMOLD, enmGuestMode),
181 SSMFIELD_ENTRY_TERM()
182};
183
184
185/**
186 * Find the ROM tracking structure for the given page.
187 *
188 * @returns Pointer to the ROM page structure. NULL if the caller didn't check
189 * that it's a ROM page.
190 * @param pVM The VM handle.
191 * @param GCPhys The address of the ROM page.
192 */
193static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
194{
195 for (PPGMROMRANGE pRomRange = pVM->pgm.s.CTX_SUFF(pRomRanges);
196 pRomRange;
197 pRomRange = pRomRange->CTX_SUFF(pNext))
198 {
199 RTGCPHYS off = GCPhys - pRomRange->GCPhys;
200 if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
201 return &pRomRange->aPages[off >> PAGE_SHIFT];
202 }
203 return NULL;
204}
205
206
207/**
208 * Prepares the ROM pages for a live save.
209 *
210 * @returns VBox status code.
211 * @param pVM The VM handle.
212 */
213static int pgmR3PrepRomPages(PVM pVM)
214{
215 /*
216 * Initialize the live save tracking in the ROM page descriptors.
217 */
218 pgmLock(pVM);
219 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
220 {
221 PPGMRAMRANGE pRamHint = NULL;;
222 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
223
224 for (uint32_t iPage = 0; iPage < cPages; iPage++)
225 {
226 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)PGMROMPROT_INVALID;
227 pRom->aPages[iPage].LiveSave.fWrittenTo = false;
228 pRom->aPages[iPage].LiveSave.fDirty = true;
229 pRom->aPages[iPage].LiveSave.fDirtiedRecently = true;
230 if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
231 {
232 if (PGMROMPROT_IS_ROM(pRom->aPages[iPage].enmProt))
233 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
234 else
235 {
236 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
237 PPGMPAGE pPage;
238 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
239 AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys));
240 if (RT_SUCCESS(rc))
241 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(pPage) && !PGM_PAGE_IS_BALLOONED(pPage);
242 else
243 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
244 }
245 }
246 }
247
248 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
249 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
250 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
251 }
252 pgmUnlock(pVM);
253
254 return VINF_SUCCESS;
255}
256
257
258/**
259 * Assigns IDs to the ROM ranges and saves them.
260 *
261 * @returns VBox status code.
262 * @param pVM The VM handle.
263 * @param pSSM Saved state handle.
264 */
265static int pgmR3SaveRomRanges(PVM pVM, PSSMHANDLE pSSM)
266{
267 pgmLock(pVM);
268 uint8_t id = 1;
269 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3, id++)
270 {
271 pRom->idSavedState = id;
272 SSMR3PutU8(pSSM, id);
273 SSMR3PutStrZ(pSSM, ""); /* device name */
274 SSMR3PutU32(pSSM, 0); /* device instance */
275 SSMR3PutU8(pSSM, 0); /* region */
276 SSMR3PutStrZ(pSSM, pRom->pszDesc);
277 SSMR3PutGCPhys(pSSM, pRom->GCPhys);
278 int rc = SSMR3PutGCPhys(pSSM, pRom->cb);
279 if (RT_FAILURE(rc))
280 break;
281 }
282 pgmUnlock(pVM);
283 return SSMR3PutU8(pSSM, UINT8_MAX);
284}
285
286
287/**
288 * Loads the ROM range ID assignments.
289 *
290 * @returns VBox status code.
291 *
292 * @param pVM The VM handle.
293 * @param pSSM The saved state handle.
294 */
295static int pgmR3LoadRomRanges(PVM pVM, PSSMHANDLE pSSM)
296{
297 PGM_LOCK_ASSERT_OWNER(pVM);
298
299 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
300 pRom->idSavedState = UINT8_MAX;
301
302 for (;;)
303 {
304 /*
305 * Read the data.
306 */
307 uint8_t id;
308 int rc = SSMR3GetU8(pSSM, &id);
309 if (RT_FAILURE(rc))
310 return rc;
311 if (id == UINT8_MAX)
312 {
313 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
314 AssertLogRelMsg(pRom->idSavedState != UINT8_MAX,
315 ("The \"%s\" ROM was not found in the saved state. Probably due to some misconfiguration\n",
316 pRom->pszDesc));
317 return VINF_SUCCESS; /* the end */
318 }
319 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
320
321 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
322 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
323 AssertLogRelRCReturn(rc, rc);
324
325 uint32_t uInstance;
326 SSMR3GetU32(pSSM, &uInstance);
327 uint8_t iRegion;
328 SSMR3GetU8(pSSM, &iRegion);
329
330 char szDesc[64];
331 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
332 AssertLogRelRCReturn(rc, rc);
333
334 RTGCPHYS GCPhys;
335 SSMR3GetGCPhys(pSSM, &GCPhys);
336 RTGCPHYS cb;
337 rc = SSMR3GetGCPhys(pSSM, &cb);
338 if (RT_FAILURE(rc))
339 return rc;
340 AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
341 AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
342
343 /*
344 * Locate a matching ROM range.
345 */
346 AssertLogRelMsgReturn( uInstance == 0
347 && iRegion == 0
348 && szDevName[0] == '\0',
349 ("GCPhys=%RGp %s\n", GCPhys, szDesc),
350 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
351 PPGMROMRANGE pRom;
352 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
353 {
354 if ( pRom->idSavedState == UINT8_MAX
355 && !strcmp(pRom->pszDesc, szDesc))
356 {
357 pRom->idSavedState = id;
358 break;
359 }
360 }
361 if (!pRom)
362 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("ROM at %RGp by the name '%s' was not found"), GCPhys, szDesc);
363 } /* forever */
364}
365
366
367/**
368 * Scan ROM pages.
369 *
370 * @param pVM The VM handle.
371 */
372static void pgmR3ScanRomPages(PVM pVM)
373{
374 /*
375 * The shadow ROMs.
376 */
377 pgmLock(pVM);
378 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
379 {
380 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
381 {
382 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
383 for (uint32_t iPage = 0; iPage < cPages; iPage++)
384 {
385 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
386 if (pRomPage->LiveSave.fWrittenTo)
387 {
388 pRomPage->LiveSave.fWrittenTo = false;
389 if (!pRomPage->LiveSave.fDirty)
390 {
391 pRomPage->LiveSave.fDirty = true;
392 pVM->pgm.s.LiveSave.Rom.cReadyPages--;
393 pVM->pgm.s.LiveSave.Rom.cDirtyPages++;
394 }
395 pRomPage->LiveSave.fDirtiedRecently = true;
396 }
397 else
398 pRomPage->LiveSave.fDirtiedRecently = false;
399 }
400 }
401 }
402 pgmUnlock(pVM);
403}
404
405
406/**
407 * Takes care of the virgin ROM pages in the first pass.
408 *
409 * This is an attempt at simplifying the handling of ROM pages a little bit.
410 * This ASSUMES that no new ROM ranges will be added and that they won't be
411 * relinked in any way.
412 *
413 * @param pVM The VM handle.
414 * @param pSSM The SSM handle.
415 * @param fLiveSave Whether we're in a live save or not.
416 */
417static int pgmR3SaveRomVirginPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
418{
419 if (FTMIsDeltaLoadSaveActive(pVM))
420 return VINF_SUCCESS; /* nothing to do as nothing has changed here */
421
422 pgmLock(pVM);
423 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
424 {
425 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
426 for (uint32_t iPage = 0; iPage < cPages; iPage++)
427 {
428 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
429 PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;
430
431 /* Get the virgin page descriptor. */
432 PPGMPAGE pPage;
433 if (PGMROMPROT_IS_ROM(enmProt))
434 pPage = pgmPhysGetPage(pVM, GCPhys);
435 else
436 pPage = &pRom->aPages[iPage].Virgin;
437
438 /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
439 int rc = VINF_SUCCESS;
440 char abPage[PAGE_SIZE];
441 if ( !PGM_PAGE_IS_ZERO(pPage)
442 && !PGM_PAGE_IS_BALLOONED(pPage))
443 {
444 void const *pvPage;
445 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
446 if (RT_SUCCESS(rc))
447 memcpy(abPage, pvPage, PAGE_SIZE);
448 }
449 else
450 ASMMemZeroPage(abPage);
451 pgmUnlock(pVM);
452 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
453
454 /* Save it. */
455 if (iPage > 0)
456 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
457 else
458 {
459 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
460 SSMR3PutU8(pSSM, pRom->idSavedState);
461 SSMR3PutU32(pSSM, iPage);
462 }
463 SSMR3PutU8(pSSM, (uint8_t)enmProt);
464 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
465 if (RT_FAILURE(rc))
466 return rc;
467
468 /* Update state. */
469 pgmLock(pVM);
470 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
471 if (fLiveSave)
472 {
473 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
474 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
475 pVM->pgm.s.LiveSave.cSavedPages++;
476 }
477 }
478 }
479 pgmUnlock(pVM);
480 return VINF_SUCCESS;
481}
482
483
484/**
485 * Saves dirty pages in the shadowed ROM ranges.
486 *
487 * Used by pgmR3LiveExecPart2 and pgmR3SaveExecMemory.
488 *
489 * @returns VBox status code.
490 * @param pVM The VM handle.
491 * @param pSSM The SSM handle.
492 * @param fLiveSave Whether it's a live save or not.
493 * @param fFinalPass Whether this is the final pass or not.
494 */
495static int pgmR3SaveShadowedRomPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
496{
497 if (FTMIsDeltaLoadSaveActive(pVM))
498 return VINF_SUCCESS; /* nothing to do as we deal with those pages separately */
499
500 /*
501 * The Shadowed ROMs.
502 *
503 * ASSUMES that the ROM ranges are fixed.
504 * ASSUMES that all the ROM ranges are mapped.
505 */
506 pgmLock(pVM);
507 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
508 {
509 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
510 {
511 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
512 uint32_t iPrevPage = cPages;
513 for (uint32_t iPage = 0; iPage < cPages; iPage++)
514 {
515 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
516 if ( !fLiveSave
517 || ( pRomPage->LiveSave.fDirty
518 && ( ( !pRomPage->LiveSave.fDirtiedRecently
519 && !pRomPage->LiveSave.fWrittenTo)
520 || fFinalPass
521 )
522 )
523 )
524 {
525 uint8_t abPage[PAGE_SIZE];
526 PGMROMPROT enmProt = pRomPage->enmProt;
527 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
528 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(pVM, GCPhys);
529 bool fZero = PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_BALLOONED(pPage); Assert(!PGM_PAGE_IS_BALLOONED(pPage)); /* Shouldn't be ballooned. */
530 int rc = VINF_SUCCESS;
531 if (!fZero)
532 {
533 void const *pvPage;
534 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
535 if (RT_SUCCESS(rc))
536 memcpy(abPage, pvPage, PAGE_SIZE);
537 }
538 if (fLiveSave && RT_SUCCESS(rc))
539 {
540 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
541 pRomPage->LiveSave.fDirty = false;
542 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
543 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
544 pVM->pgm.s.LiveSave.cSavedPages++;
545 }
546 pgmUnlock(pVM);
547 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
548
549 if (iPage - 1U == iPrevPage && iPage > 0)
550 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW));
551 else
552 {
553 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW) | PGM_STATE_REC_FLAG_ADDR);
554 SSMR3PutU8(pSSM, pRom->idSavedState);
555 SSMR3PutU32(pSSM, iPage);
556 }
557 rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
558 if (!fZero)
559 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
560 if (RT_FAILURE(rc))
561 return rc;
562
563 pgmLock(pVM);
564 iPrevPage = iPage;
565 }
566 /*
567 * In the final pass, make sure the protection is in sync.
568 */
569 else if ( fFinalPass
570 && pRomPage->LiveSave.u8Prot != pRomPage->enmProt)
571 {
572 PGMROMPROT enmProt = pRomPage->enmProt;
573 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
574 pgmUnlock(pVM);
575
576 if (iPage - 1U == iPrevPage && iPage > 0)
577 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT);
578 else
579 {
580 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT | PGM_STATE_REC_FLAG_ADDR);
581 SSMR3PutU8(pSSM, pRom->idSavedState);
582 SSMR3PutU32(pSSM, iPage);
583 }
584 int rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
585 if (RT_FAILURE(rc))
586 return rc;
587
588 pgmLock(pVM);
589 iPrevPage = iPage;
590 }
591 }
592 }
593 }
594 pgmUnlock(pVM);
595 return VINF_SUCCESS;
596}
597
598
599/**
600 * Cleans up ROM pages after a live save.
601 *
602 * @param pVM The VM handle.
603 */
604static void pgmR3DoneRomPages(PVM pVM)
605{
606 NOREF(pVM);
607}
608
609
610/**
611 * Prepares the MMIO2 pages for a live save.
612 *
613 * @returns VBox status code.
614 * @param pVM The VM handle.
615 */
616static int pgmR3PrepMmio2Pages(PVM pVM)
617{
618 /*
619 * Initialize the live save tracking in the MMIO2 ranges.
620 * ASSUME nothing changes here.
621 */
622 pgmLock(pVM);
623 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
624 {
625 uint32_t const cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
626 pgmUnlock(pVM);
627
628 PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
629 if (!paLSPages)
630 return VERR_NO_MEMORY;
631 for (uint32_t iPage = 0; iPage < cPages; iPage++)
632 {
633 /* Initialize it as a dirty zero page. */
634 paLSPages[iPage].fDirty = true;
635 paLSPages[iPage].cUnchangedScans = 0;
636 paLSPages[iPage].fZero = true;
637 paLSPages[iPage].u32CrcH1 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
638 paLSPages[iPage].u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
639 }
640
641 pgmLock(pVM);
642 pMmio2->paLSPages = paLSPages;
643 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
644 }
645 pgmUnlock(pVM);
646 return VINF_SUCCESS;
647}
648
649
650/**
651 * Assigns IDs to the MMIO2 ranges and saves them.
652 *
653 * @returns VBox status code.
654 * @param pVM The VM handle.
655 * @param pSSM Saved state handle.
656 */
657static int pgmR3SaveMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
658{
659 pgmLock(pVM);
660 uint8_t id = 1;
661 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3, id++)
662 {
663 pMmio2->idSavedState = id;
664 SSMR3PutU8(pSSM, id);
665 SSMR3PutStrZ(pSSM, pMmio2->pDevInsR3->pReg->szName);
666 SSMR3PutU32(pSSM, pMmio2->pDevInsR3->iInstance);
667 SSMR3PutU8(pSSM, pMmio2->iRegion);
668 SSMR3PutStrZ(pSSM, pMmio2->RamRange.pszDesc);
669 int rc = SSMR3PutGCPhys(pSSM, pMmio2->RamRange.cb);
670 if (RT_FAILURE(rc))
671 break;
672 }
673 pgmUnlock(pVM);
674 return SSMR3PutU8(pSSM, UINT8_MAX);
675}
676
677
678/**
679 * Loads the MMIO2 range ID assignments.
680 *
681 * @returns VBox status code.
682 *
683 * @param pVM The VM handle.
684 * @param pSSM The saved state handle.
685 */
686static int pgmR3LoadMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
687{
688 PGM_LOCK_ASSERT_OWNER(pVM);
689
690 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
691 pMmio2->idSavedState = UINT8_MAX;
692
693 for (;;)
694 {
695 /*
696 * Read the data.
697 */
698 uint8_t id;
699 int rc = SSMR3GetU8(pSSM, &id);
700 if (RT_FAILURE(rc))
701 return rc;
702 if (id == UINT8_MAX)
703 {
704 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
705 AssertLogRelMsg(pMmio2->idSavedState != UINT8_MAX, ("%s\n", pMmio2->RamRange.pszDesc));
706 return VINF_SUCCESS; /* the end */
707 }
708 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
709
710 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
711 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
712 AssertLogRelRCReturn(rc, rc);
713
714 uint32_t uInstance;
715 SSMR3GetU32(pSSM, &uInstance);
716 uint8_t iRegion;
717 SSMR3GetU8(pSSM, &iRegion);
718
719 char szDesc[64];
720 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
721 AssertLogRelRCReturn(rc, rc);
722
723 RTGCPHYS cb;
724 rc = SSMR3GetGCPhys(pSSM, &cb);
725 AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
726
727 /*
728 * Locate a matching MMIO2 range.
729 */
730 PPGMMMIO2RANGE pMmio2;
731 for (pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
732 {
733 if ( pMmio2->idSavedState == UINT8_MAX
734 && pMmio2->iRegion == iRegion
735 && pMmio2->pDevInsR3->iInstance == uInstance
736 && !strcmp(pMmio2->pDevInsR3->pReg->szName, szDevName))
737 {
738 pMmio2->idSavedState = id;
739 break;
740 }
741 }
742 if (!pMmio2)
743 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to locate a MMIO2 range called '%s' owned by %s/%u, region %d"),
744 szDesc, szDevName, uInstance, iRegion);
745
746 /*
747 * Validate the configuration, the size of the MMIO2 region should be
748 * the same.
749 */
750 if (cb != pMmio2->RamRange.cb)
751 {
752 LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n",
753 pMmio2->RamRange.pszDesc, cb, pMmio2->RamRange.cb));
754 if (cb > pMmio2->RamRange.cb) /* bad idea? */
755 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp"),
756 pMmio2->RamRange.pszDesc, cb, pMmio2->RamRange.cb);
757 }
758 } /* forever */
759}
760
761
762/**
763 * Scans one MMIO2 page.
764 *
765 * @returns True if changed, false if unchanged.
766 *
767 * @param pVM The VM handle
768 * @param pbPage The page bits.
769 * @param pLSPage The live save tracking structure for the page.
770 *
771 */
772DECLINLINE(bool) pgmR3ScanMmio2Page(PVM pVM, uint8_t const *pbPage, PPGMLIVESAVEMMIO2PAGE pLSPage)
773{
774 /*
775 * Special handling of zero pages.
776 */
777 bool const fZero = pLSPage->fZero;
778 if (fZero)
779 {
780 if (ASMMemIsZeroPage(pbPage))
781 {
782 /* Not modified. */
783 if (pLSPage->fDirty)
784 pLSPage->cUnchangedScans++;
785 return false;
786 }
787
788 pLSPage->fZero = false;
789 pLSPage->u32CrcH1 = RTCrc32(pbPage, PAGE_SIZE / 2);
790 }
791 else
792 {
793 /*
794 * CRC the first half, if it doesn't match the page is dirty and
795 * we won't check the 2nd half (we'll do that next time).
796 */
797 uint32_t u32CrcH1 = RTCrc32(pbPage, PAGE_SIZE / 2);
798 if (u32CrcH1 == pLSPage->u32CrcH1)
799 {
800 uint32_t u32CrcH2 = RTCrc32(pbPage + PAGE_SIZE / 2, PAGE_SIZE / 2);
801 if (u32CrcH2 == pLSPage->u32CrcH2)
802 {
803 /* Probably not modified. */
804 if (pLSPage->fDirty)
805 pLSPage->cUnchangedScans++;
806 return false;
807 }
808
809 pLSPage->u32CrcH2 = u32CrcH2;
810 }
811 else
812 {
813 pLSPage->u32CrcH1 = u32CrcH1;
814 if ( u32CrcH1 == PGM_STATE_CRC32_ZERO_HALF_PAGE
815 && ASMMemIsZeroPage(pbPage))
816 {
817 pLSPage->u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
818 pLSPage->fZero = true;
819 }
820 }
821 }
822
823 /* dirty page path */
824 pLSPage->cUnchangedScans = 0;
825 if (!pLSPage->fDirty)
826 {
827 pLSPage->fDirty = true;
828 pVM->pgm.s.LiveSave.Mmio2.cReadyPages--;
829 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages++;
830 if (fZero)
831 pVM->pgm.s.LiveSave.Mmio2.cZeroPages--;
832 }
833 return true;
834}
835
836
837/**
838 * Scan for MMIO2 page modifications.
839 *
840 * @param pVM The VM handle.
841 * @param uPass The pass number.
842 */
843static void pgmR3ScanMmio2Pages(PVM pVM, uint32_t uPass)
844{
845 /*
846 * Since this is a bit expensive we lower the scan rate after a little while.
847 */
848 if ( ( (uPass & 3) != 0
849 && uPass > 10)
850 || uPass == SSM_PASS_FINAL)
851 return;
852
853 pgmLock(pVM); /* paranoia */
854 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
855 {
856 PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
857 uint32_t cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
858 pgmUnlock(pVM);
859
860 for (uint32_t iPage = 0; iPage < cPages; iPage++)
861 {
862 uint8_t const *pbPage = (uint8_t const *)pMmio2->pvR3 + iPage * PAGE_SIZE;
863 pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]);
864 }
865
866 pgmLock(pVM);
867 }
868 pgmUnlock(pVM);
869
870}
871
872
873/**
874 * Save quiescent MMIO2 pages.
875 *
876 * @returns VBox status code.
877 * @param pVM The VM handle.
878 * @param pSSM The SSM handle.
879 * @param fLiveSave Whether it's a live save or not.
880 * @param uPass The pass number.
881 */
882static int pgmR3SaveMmio2Pages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
883{
884 /** @todo implement live saving of MMIO2 pages. (Need some way of telling the
885 * device that we wish to know about changes.) */
886
887 int rc = VINF_SUCCESS;
888 if (uPass == SSM_PASS_FINAL)
889 {
890 /*
891 * The mop up round.
892 */
893 pgmLock(pVM);
894 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3;
895 pMmio2 && RT_SUCCESS(rc);
896 pMmio2 = pMmio2->pNextR3)
897 {
898 PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
899 uint8_t const *pbPage = (uint8_t const *)pMmio2->RamRange.pvR3;
900 uint32_t cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
901 uint32_t iPageLast = cPages;
902 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
903 {
904 uint8_t u8Type;
905 if (!fLiveSave)
906 u8Type = ASMMemIsZeroPage(pbPage) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
907 else
908 {
909 /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
910 if ( !paLSPages[iPage].fDirty
911 && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
912 {
913 if (paLSPages[iPage].fZero)
914 continue;
915
916 uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
917 RTSha1(pbPage, PAGE_SIZE, abSha1Hash);
918 if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
919 continue;
920 }
921 u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
922 pVM->pgm.s.LiveSave.cSavedPages++;
923 }
924
925 if (iPage != 0 && iPage == iPageLast + 1)
926 rc = SSMR3PutU8(pSSM, u8Type);
927 else
928 {
929 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
930 SSMR3PutU8(pSSM, pMmio2->idSavedState);
931 rc = SSMR3PutU32(pSSM, iPage);
932 }
933 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
934 rc = SSMR3PutMem(pSSM, pbPage, PAGE_SIZE);
935 if (RT_FAILURE(rc))
936 break;
937 iPageLast = iPage;
938 }
939 }
940 pgmUnlock(pVM);
941 }
942 /*
943 * Reduce the rate after a little while since the current MMIO2 approach is
944 * a bit expensive.
945 * We position it two passes after the scan pass to avoid saving busy pages.
946 */
947 else if ( uPass <= 10
948 || (uPass & 3) == 2)
949 {
950 pgmLock(pVM);
951 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3;
952 pMmio2 && RT_SUCCESS(rc);
953 pMmio2 = pMmio2->pNextR3)
954 {
955 PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
956 uint8_t const *pbPage = (uint8_t const *)pMmio2->RamRange.pvR3;
957 uint32_t cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
958 uint32_t iPageLast = cPages;
959 pgmUnlock(pVM);
960
961 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
962 {
963 /* Skip clean pages and pages which hasn't quiesced. */
964 if (!paLSPages[iPage].fDirty)
965 continue;
966 if (paLSPages[iPage].cUnchangedScans < 3)
967 continue;
968 if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
969 continue;
970
971 /* Save it. */
972 bool const fZero = paLSPages[iPage].fZero;
973 uint8_t abPage[PAGE_SIZE];
974 if (!fZero)
975 {
976 memcpy(abPage, pbPage, PAGE_SIZE);
977 RTSha1(abPage, PAGE_SIZE, paLSPages[iPage].abSha1Saved);
978 }
979
980 uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
981 if (iPage != 0 && iPage == iPageLast + 1)
982 rc = SSMR3PutU8(pSSM, u8Type);
983 else
984 {
985 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
986 SSMR3PutU8(pSSM, pMmio2->idSavedState);
987 rc = SSMR3PutU32(pSSM, iPage);
988 }
989 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
990 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
991 if (RT_FAILURE(rc))
992 break;
993
994 /* Housekeeping. */
995 paLSPages[iPage].fDirty = false;
996 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
997 pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
998 if (u8Type == PGM_STATE_REC_MMIO2_ZERO)
999 pVM->pgm.s.LiveSave.Mmio2.cZeroPages++;
1000 pVM->pgm.s.LiveSave.cSavedPages++;
1001 iPageLast = iPage;
1002 }
1003
1004 pgmLock(pVM);
1005 }
1006 pgmUnlock(pVM);
1007 }
1008
1009 return rc;
1010}
1011
1012
1013/**
1014 * Cleans up MMIO2 pages after a live save.
1015 *
1016 * @param pVM The VM handle.
1017 */
1018static void pgmR3DoneMmio2Pages(PVM pVM)
1019{
1020 /*
1021 * Free the tracking structures for the MMIO2 pages.
1022 * We do the freeing outside the lock in case the VM is running.
1023 */
1024 pgmLock(pVM);
1025 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
1026 {
1027 void *pvMmio2ToFree = pMmio2->paLSPages;
1028 if (pvMmio2ToFree)
1029 {
1030 pMmio2->paLSPages = NULL;
1031 pgmUnlock(pVM);
1032 MMR3HeapFree(pvMmio2ToFree);
1033 pgmLock(pVM);
1034 }
1035 }
1036 pgmUnlock(pVM);
1037}
1038
1039
1040/**
1041 * Prepares the RAM pages for a live save.
1042 *
1043 * @returns VBox status code.
1044 * @param pVM The VM handle.
1045 */
1046static int pgmR3PrepRamPages(PVM pVM)
1047{
1048
1049 /*
1050 * Try allocating tracking structures for the ram ranges.
1051 *
1052 * To avoid lock contention, we leave the lock every time we're allocating
1053 * a new array. This means we'll have to ditch the allocation and start
1054 * all over again if the RAM range list changes in-between.
1055 *
1056 * Note! pgmR3SaveDone will always be called and it is therefore responsible
1057 * for cleaning up.
1058 */
1059 PPGMRAMRANGE pCur;
1060 pgmLock(pVM);
1061 do
1062 {
1063 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1064 {
1065 if ( !pCur->paLSPages
1066 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1067 {
1068 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1069 uint32_t const cPages = pCur->cb >> PAGE_SHIFT;
1070 pgmUnlock(pVM);
1071 PPGMLIVESAVERAMPAGE paLSPages = (PPGMLIVESAVERAMPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVERAMPAGE));
1072 if (!paLSPages)
1073 return VERR_NO_MEMORY;
1074 pgmLock(pVM);
1075 if (pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1076 {
1077 pgmUnlock(pVM);
1078 MMR3HeapFree(paLSPages);
1079 pgmLock(pVM);
1080 break; /* try again */
1081 }
1082 pCur->paLSPages = paLSPages;
1083
1084 /*
1085 * Initialize the array.
1086 */
1087 uint32_t iPage = cPages;
1088 while (iPage-- > 0)
1089 {
1090 /** @todo yield critsect! (after moving this away from EMT0) */
1091 PCPGMPAGE pPage = &pCur->aPages[iPage];
1092 paLSPages[iPage].cDirtied = 0;
1093 paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */
1094 paLSPages[iPage].fWriteMonitored = 0;
1095 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1096 paLSPages[iPage].u2Reserved = 0;
1097 switch (PGM_PAGE_GET_TYPE(pPage))
1098 {
1099 case PGMPAGETYPE_RAM:
1100 if ( PGM_PAGE_IS_ZERO(pPage)
1101 || PGM_PAGE_IS_BALLOONED(pPage))
1102 {
1103 paLSPages[iPage].fZero = 1;
1104 paLSPages[iPage].fShared = 0;
1105#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1106 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1107#endif
1108 }
1109 else if (PGM_PAGE_IS_SHARED(pPage))
1110 {
1111 paLSPages[iPage].fZero = 0;
1112 paLSPages[iPage].fShared = 1;
1113#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1114 paLSPages[iPage].u32Crc = UINT32_MAX;
1115#endif
1116 }
1117 else
1118 {
1119 paLSPages[iPage].fZero = 0;
1120 paLSPages[iPage].fShared = 0;
1121#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1122 paLSPages[iPage].u32Crc = UINT32_MAX;
1123#endif
1124 }
1125 paLSPages[iPage].fIgnore = 0;
1126 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1127 break;
1128
1129 case PGMPAGETYPE_ROM_SHADOW:
1130 case PGMPAGETYPE_ROM:
1131 {
1132 paLSPages[iPage].fZero = 0;
1133 paLSPages[iPage].fShared = 0;
1134 paLSPages[iPage].fDirty = 0;
1135 paLSPages[iPage].fIgnore = 1;
1136#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1137 paLSPages[iPage].u32Crc = UINT32_MAX;
1138#endif
1139 pVM->pgm.s.LiveSave.cIgnoredPages++;
1140 break;
1141 }
1142
1143 default:
1144 AssertMsgFailed(("%R[pgmpage]", pPage));
1145 case PGMPAGETYPE_MMIO2:
1146 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1147 paLSPages[iPage].fZero = 0;
1148 paLSPages[iPage].fShared = 0;
1149 paLSPages[iPage].fDirty = 0;
1150 paLSPages[iPage].fIgnore = 1;
1151#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1152 paLSPages[iPage].u32Crc = UINT32_MAX;
1153#endif
1154 pVM->pgm.s.LiveSave.cIgnoredPages++;
1155 break;
1156
1157 case PGMPAGETYPE_MMIO:
1158 paLSPages[iPage].fZero = 0;
1159 paLSPages[iPage].fShared = 0;
1160 paLSPages[iPage].fDirty = 0;
1161 paLSPages[iPage].fIgnore = 1;
1162#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1163 paLSPages[iPage].u32Crc = UINT32_MAX;
1164#endif
1165 pVM->pgm.s.LiveSave.cIgnoredPages++;
1166 break;
1167 }
1168 }
1169 }
1170 }
1171 } while (pCur);
1172 pgmUnlock(pVM);
1173
1174 return VINF_SUCCESS;
1175}
1176
1177
1178/**
1179 * Saves the RAM configuration.
1180 *
1181 * @returns VBox status code.
1182 * @param pVM The VM handle.
1183 * @param pSSM The saved state handle.
1184 */
1185static int pgmR3SaveRamConfig(PVM pVM, PSSMHANDLE pSSM)
1186{
1187 uint32_t cbRamHole = 0;
1188 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
1189 AssertRCReturn(rc, rc);
1190
1191 uint64_t cbRam = 0;
1192 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
1193 AssertRCReturn(rc, rc);
1194
1195 SSMR3PutU32(pSSM, cbRamHole);
1196 return SSMR3PutU64(pSSM, cbRam);
1197}
1198
1199
1200/**
1201 * Loads and verifies the RAM configuration.
1202 *
1203 * @returns VBox status code.
1204 * @param pVM The VM handle.
1205 * @param pSSM The saved state handle.
1206 */
1207static int pgmR3LoadRamConfig(PVM pVM, PSSMHANDLE pSSM)
1208{
1209 uint32_t cbRamHoleCfg = 0;
1210 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHoleCfg, MM_RAM_HOLE_SIZE_DEFAULT);
1211 AssertRCReturn(rc, rc);
1212
1213 uint64_t cbRamCfg = 0;
1214 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRamCfg, 0);
1215 AssertRCReturn(rc, rc);
1216
1217 uint32_t cbRamHoleSaved;
1218 SSMR3GetU32(pSSM, &cbRamHoleSaved);
1219
1220 uint64_t cbRamSaved;
1221 rc = SSMR3GetU64(pSSM, &cbRamSaved);
1222 AssertRCReturn(rc, rc);
1223
1224 if ( cbRamHoleCfg != cbRamHoleSaved
1225 || cbRamCfg != cbRamSaved)
1226 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Ram config mismatch: saved=%RX64/%RX32 config=%RX64/%RX32 (RAM/Hole)"),
1227 cbRamSaved, cbRamHoleSaved, cbRamCfg, cbRamHoleCfg);
1228 return VINF_SUCCESS;
1229}
1230
1231#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1232
1233/**
1234 * Calculates the CRC-32 for a RAM page and updates the live save page tracking
1235 * info with it.
1236 *
1237 * @param pVM The VM handle.
1238 * @param pCur The current RAM range.
1239 * @param paLSPages The current array of live save page tracking
1240 * structures.
1241 * @param iPage The page index.
1242 */
1243static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
1244{
1245 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1246 PGMPAGEMAPLOCK PgMpLck;
1247 void const *pvPage;
1248 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1249 if (RT_SUCCESS(rc))
1250 {
1251 paLSPages[iPage].u32Crc = RTCrc32(pvPage, PAGE_SIZE);
1252 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1253 }
1254 else
1255 paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */
1256}
1257
1258
1259/**
1260 * Verifies the CRC-32 for a page given it's raw bits.
1261 *
1262 * @param pvPage The page bits.
1263 * @param pCur The current RAM range.
1264 * @param paLSPages The current array of live save page tracking
1265 * structures.
1266 * @param iPage The page index.
1267 */
1268static void pgmR3StateVerifyCrc32ForPage(void const *pvPage, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1269{
1270 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1271 {
1272 uint32_t u32Crc = RTCrc32(pvPage, PAGE_SIZE);
1273 Assert( ( !PGM_PAGE_IS_ZERO(&pCur->aPages[iPage])
1274 && !PGM_PAGE_IS_BALLOONED(&pCur->aPages[iPage]))
1275 || u32Crc == PGM_STATE_CRC32_ZERO_PAGE);
1276 AssertMsg(paLSPages[iPage].u32Crc == u32Crc,
1277 ("%08x != %08x for %RGp %R[pgmpage] %s\n", paLSPages[iPage].u32Crc, u32Crc,
1278 pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pCur->aPages[iPage], pszWhere));
1279 }
1280}
1281
1282
1283/**
1284 * Verifies the CRC-32 for a RAM page.
1285 *
1286 * @param pVM The VM handle.
1287 * @param pCur The current RAM range.
1288 * @param paLSPages The current array of live save page tracking
1289 * structures.
1290 * @param iPage The page index.
1291 */
1292static void pgmR3StateVerifyCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1293{
1294 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1295 {
1296 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1297 PGMPAGEMAPLOCK PgMpLck;
1298 void const *pvPage;
1299 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1300 if (RT_SUCCESS(rc))
1301 {
1302 pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage, pszWhere);
1303 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1304 }
1305 }
1306}
1307
1308#endif /* PGMLIVESAVERAMPAGE_WITH_CRC32 */
1309
1310/**
1311 * Scan for RAM page modifications and reprotect them.
1312 *
1313 * @param pVM The VM handle.
1314 * @param fFinalPass Whether this is the final pass or not.
1315 */
1316static void pgmR3ScanRamPages(PVM pVM, bool fFinalPass)
1317{
1318 /*
1319 * The RAM.
1320 */
1321 RTGCPHYS GCPhysCur = 0;
1322 PPGMRAMRANGE pCur;
1323 pgmLock(pVM);
1324 do
1325 {
1326 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1327 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1328 {
1329 if ( pCur->GCPhysLast > GCPhysCur
1330 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1331 {
1332 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1333 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
1334 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
1335 GCPhysCur = 0;
1336 for (; iPage < cPages; iPage++)
1337 {
1338 /* Do yield first. */
1339 if ( !fFinalPass
1340#ifndef PGMLIVESAVERAMPAGE_WITH_CRC32
1341 && (iPage & 0x7ff) == 0x100
1342#endif
1343 && PDMR3CritSectYield(&pVM->pgm.s.CritSectX)
1344 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1345 {
1346 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1347 break; /* restart */
1348 }
1349
1350 /* Skip already ignored pages. */
1351 if (paLSPages[iPage].fIgnore)
1352 continue;
1353
1354 if (RT_LIKELY(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM))
1355 {
1356 /*
1357 * A RAM page.
1358 */
1359 switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
1360 {
1361 case PGM_PAGE_STATE_ALLOCATED:
1362 /** @todo Optimize this: Don't always re-enable write
1363 * monitoring if the page is known to be very busy. */
1364 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1365 {
1366 Assert(paLSPages[iPage].fWriteMonitored);
1367 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1368 Assert(pVM->pgm.s.cWrittenToPages > 0);
1369 pVM->pgm.s.cWrittenToPages--;
1370 }
1371 else
1372 {
1373 Assert(!paLSPages[iPage].fWriteMonitored);
1374 pVM->pgm.s.LiveSave.Ram.cMonitoredPages++;
1375 }
1376
1377 if (!paLSPages[iPage].fDirty)
1378 {
1379 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1380 if (paLSPages[iPage].fZero)
1381 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1382 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1383 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1384 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1385 }
1386
1387 pgmPhysPageWriteMonitor(pVM, &pCur->aPages[iPage],
1388 pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1389 paLSPages[iPage].fWriteMonitored = 1;
1390 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1391 paLSPages[iPage].fDirty = 1;
1392 paLSPages[iPage].fZero = 0;
1393 paLSPages[iPage].fShared = 0;
1394#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1395 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1396#endif
1397 break;
1398
1399 case PGM_PAGE_STATE_WRITE_MONITORED:
1400 Assert(paLSPages[iPage].fWriteMonitored);
1401 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) == 0)
1402 {
1403#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1404 if (paLSPages[iPage].fWriteMonitoredJustNow)
1405 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1406 else
1407 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "scan");
1408#endif
1409 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1410 }
1411 else
1412 {
1413 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1414#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1415 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1416#endif
1417 if (!paLSPages[iPage].fDirty)
1418 {
1419 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1420 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1421 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1422 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1423 }
1424 }
1425 break;
1426
1427 case PGM_PAGE_STATE_ZERO:
1428 if (!paLSPages[iPage].fZero)
1429 {
1430 if (!paLSPages[iPage].fDirty)
1431 {
1432 paLSPages[iPage].fDirty = 1;
1433 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1434 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1435 }
1436 paLSPages[iPage].fZero = 1;
1437 paLSPages[iPage].fShared = 0;
1438#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1439 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1440#endif
1441 }
1442 break;
1443
1444 case PGM_PAGE_STATE_BALLOONED:
1445 if (!paLSPages[iPage].fZero)
1446 {
1447 if (!paLSPages[iPage].fDirty)
1448 {
1449 paLSPages[iPage].fDirty = 1;
1450 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1451 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1452 }
1453 paLSPages[iPage].fZero = 1;
1454 paLSPages[iPage].fShared = 0;
1455#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1456 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1457#endif
1458 }
1459 break;
1460
1461 case PGM_PAGE_STATE_SHARED:
1462 if (!paLSPages[iPage].fShared)
1463 {
1464 if (!paLSPages[iPage].fDirty)
1465 {
1466 paLSPages[iPage].fDirty = 1;
1467 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1468 if (paLSPages[iPage].fZero)
1469 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1470 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1471 }
1472 paLSPages[iPage].fZero = 0;
1473 paLSPages[iPage].fShared = 1;
1474#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1475 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1476#endif
1477 }
1478 break;
1479 }
1480 }
1481 else
1482 {
1483 /*
1484 * All other types => Ignore the page.
1485 */
1486 Assert(!paLSPages[iPage].fIgnore); /* skipped before switch */
1487 paLSPages[iPage].fIgnore = 1;
1488 if (paLSPages[iPage].fWriteMonitored)
1489 {
1490 /** @todo this doesn't hold water when we start monitoring MMIO2 and ROM shadow
1491 * pages! */
1492 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
1493 {
1494 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
1495 PGM_PAGE_SET_STATE(pVM, &pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
1496 Assert(pVM->pgm.s.cMonitoredPages > 0);
1497 pVM->pgm.s.cMonitoredPages--;
1498 }
1499 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1500 {
1501 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1502 Assert(pVM->pgm.s.cWrittenToPages > 0);
1503 pVM->pgm.s.cWrittenToPages--;
1504 }
1505 pVM->pgm.s.LiveSave.Ram.cMonitoredPages--;
1506 }
1507
1508 /** @todo the counting doesn't quite work out here. fix later? */
1509 if (paLSPages[iPage].fDirty)
1510 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1511 else
1512 {
1513 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1514 if (paLSPages[iPage].fZero)
1515 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1516 }
1517 pVM->pgm.s.LiveSave.cIgnoredPages++;
1518 }
1519 } /* for each page in range */
1520
1521 if (GCPhysCur != 0)
1522 break; /* Yield + ramrange change */
1523 GCPhysCur = pCur->GCPhysLast;
1524 }
1525 } /* for each range */
1526 } while (pCur);
1527 pgmUnlock(pVM);
1528}
1529
1530
1531/**
1532 * Save quiescent RAM pages.
1533 *
1534 * @returns VBox status code.
1535 * @param pVM The VM handle.
1536 * @param pSSM The SSM handle.
1537 * @param fLiveSave Whether it's a live save or not.
1538 * @param uPass The pass number.
1539 */
1540static int pgmR3SaveRamPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
1541{
1542 /*
1543 * The RAM.
1544 */
1545 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
1546 RTGCPHYS GCPhysCur = 0;
1547 PPGMRAMRANGE pCur;
1548 bool fFTMDeltaSaveActive = FTMIsDeltaLoadSaveActive(pVM);
1549
1550 pgmLock(pVM);
1551 do
1552 {
1553 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1554 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1555 {
1556 if ( pCur->GCPhysLast > GCPhysCur
1557 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1558 {
1559 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1560 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
1561 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
1562 GCPhysCur = 0;
1563 for (; iPage < cPages; iPage++)
1564 {
1565 /* Do yield first. */
1566 if ( uPass != SSM_PASS_FINAL
1567 && (iPage & 0x7ff) == 0x100
1568 && PDMR3CritSectYield(&pVM->pgm.s.CritSectX)
1569 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1570 {
1571 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1572 break; /* restart */
1573 }
1574
1575 PPGMPAGE pCurPage = &pCur->aPages[iPage];
1576
1577 /*
1578 * Only save pages that haven't changed since last scan and are dirty.
1579 */
1580 if ( uPass != SSM_PASS_FINAL
1581 && paLSPages)
1582 {
1583 if (!paLSPages[iPage].fDirty)
1584 continue;
1585 if (paLSPages[iPage].fWriteMonitoredJustNow)
1586 continue;
1587 if (paLSPages[iPage].fIgnore)
1588 continue;
1589 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM) /* in case of recent remappings */
1590 continue;
1591 if ( PGM_PAGE_GET_STATE(pCurPage)
1592 != ( paLSPages[iPage].fZero
1593 ? PGM_PAGE_STATE_ZERO
1594 : paLSPages[iPage].fShared
1595 ? PGM_PAGE_STATE_SHARED
1596 : PGM_PAGE_STATE_WRITE_MONITORED))
1597 continue;
1598 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) > 0)
1599 continue;
1600 }
1601 else
1602 {
1603 if ( paLSPages
1604 && !paLSPages[iPage].fDirty
1605 && !paLSPages[iPage].fIgnore)
1606 {
1607#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1608 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1609 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#1");
1610#endif
1611 continue;
1612 }
1613 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1614 continue;
1615 }
1616
1617 /*
1618 * Do the saving outside the PGM critsect since SSM may block on I/O.
1619 */
1620 int rc;
1621 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1622 bool fZero = PGM_PAGE_IS_ZERO(pCurPage);
1623 bool fBallooned = PGM_PAGE_IS_BALLOONED(pCurPage);
1624 bool fSkipped = false;
1625
1626 if (!fZero && !fBallooned)
1627 {
1628 /*
1629 * Copy the page and then save it outside the lock (since any
1630 * SSM call may block).
1631 */
1632 uint8_t abPage[PAGE_SIZE];
1633 PGMPAGEMAPLOCK PgMpLck;
1634 void const *pvPage;
1635 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage, &PgMpLck);
1636 if (RT_SUCCESS(rc))
1637 {
1638 memcpy(abPage, pvPage, PAGE_SIZE);
1639#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1640 if (paLSPages)
1641 pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage, "save#3");
1642#endif
1643 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1644 }
1645 pgmUnlock(pVM);
1646 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
1647
1648 /* Try save some memory when restoring. */
1649 if (!ASMMemIsZeroPage(pvPage))
1650 {
1651 if (fFTMDeltaSaveActive)
1652 {
1653 if ( PGM_PAGE_IS_WRITTEN_TO(pCurPage)
1654 || PGM_PAGE_IS_FT_DIRTY(pCurPage))
1655 {
1656 if (GCPhys == GCPhysLast + PAGE_SIZE)
1657 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
1658 else
1659 {
1660 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
1661 SSMR3PutGCPhys(pSSM, GCPhys);
1662 }
1663 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
1664 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pCurPage);
1665 PGM_PAGE_CLEAR_FT_DIRTY(pCurPage);
1666 }
1667 /* else nothing changed, so skip it. */
1668 else
1669 fSkipped = true;
1670 }
1671 else
1672 {
1673 if (GCPhys == GCPhysLast + PAGE_SIZE)
1674 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
1675 else
1676 {
1677 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
1678 SSMR3PutGCPhys(pSSM, GCPhys);
1679 }
1680 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
1681 }
1682 }
1683 else
1684 {
1685 if (GCPhys == GCPhysLast + PAGE_SIZE)
1686 rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO);
1687 else
1688 {
1689 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO | PGM_STATE_REC_FLAG_ADDR);
1690 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1691 }
1692 }
1693 }
1694 else
1695 {
1696 /*
1697 * Dirty zero or ballooned page.
1698 */
1699#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1700 if (paLSPages)
1701 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#2");
1702#endif
1703 pgmUnlock(pVM);
1704
1705 uint8_t u8RecType = fBallooned ? PGM_STATE_REC_RAM_BALLOONED : PGM_STATE_REC_RAM_ZERO;
1706 if (GCPhys == GCPhysLast + PAGE_SIZE)
1707 rc = SSMR3PutU8(pSSM, u8RecType);
1708 else
1709 {
1710 SSMR3PutU8(pSSM, u8RecType | PGM_STATE_REC_FLAG_ADDR);
1711 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1712 }
1713 }
1714 if (RT_FAILURE(rc))
1715 return rc;
1716
1717 pgmLock(pVM);
1718 if (!fSkipped)
1719 GCPhysLast = GCPhys;
1720 if (paLSPages)
1721 {
1722 paLSPages[iPage].fDirty = 0;
1723 pVM->pgm.s.LiveSave.Ram.cReadyPages++;
1724 if (fZero)
1725 pVM->pgm.s.LiveSave.Ram.cZeroPages++;
1726 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1727 pVM->pgm.s.LiveSave.cSavedPages++;
1728 }
1729 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1730 {
1731 GCPhysCur = GCPhys | PAGE_OFFSET_MASK;
1732 break; /* restart */
1733 }
1734
1735 } /* for each page in range */
1736
1737 if (GCPhysCur != 0)
1738 break; /* Yield + ramrange change */
1739 GCPhysCur = pCur->GCPhysLast;
1740 }
1741 } /* for each range */
1742 } while (pCur);
1743
1744 pgmUnlock(pVM);
1745
1746 return VINF_SUCCESS;
1747}
1748
1749
1750/**
1751 * Cleans up RAM pages after a live save.
1752 *
1753 * @param pVM The VM handle.
1754 */
1755static void pgmR3DoneRamPages(PVM pVM)
1756{
1757 /*
1758 * Free the tracking arrays and disable write monitoring.
1759 *
1760 * Play nice with the PGM lock in case we're called while the VM is still
1761 * running. This means we have to delay the freeing since we wish to use
1762 * paLSPages as an indicator of which RAM ranges which we need to scan for
1763 * write monitored pages.
1764 */
1765 void *pvToFree = NULL;
1766 PPGMRAMRANGE pCur;
1767 uint32_t cMonitoredPages = 0;
1768 pgmLock(pVM);
1769 do
1770 {
1771 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1772 {
1773 if (pCur->paLSPages)
1774 {
1775 if (pvToFree)
1776 {
1777 uint32_t idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1778 pgmUnlock(pVM);
1779 MMR3HeapFree(pvToFree);
1780 pvToFree = NULL;
1781 pgmLock(pVM);
1782 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1783 break; /* start over again. */
1784 }
1785
1786 pvToFree = pCur->paLSPages;
1787 pCur->paLSPages = NULL;
1788
1789 uint32_t iPage = pCur->cb >> PAGE_SHIFT;
1790 while (iPage--)
1791 {
1792 PPGMPAGE pPage = &pCur->aPages[iPage];
1793 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
1794 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1795 {
1796 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1797 cMonitoredPages++;
1798 }
1799 }
1800 }
1801 }
1802 } while (pCur);
1803
1804 Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages);
1805 if (pVM->pgm.s.cMonitoredPages < cMonitoredPages)
1806 pVM->pgm.s.cMonitoredPages = 0;
1807 else
1808 pVM->pgm.s.cMonitoredPages -= cMonitoredPages;
1809
1810 pgmUnlock(pVM);
1811
1812 MMR3HeapFree(pvToFree);
1813 pvToFree = NULL;
1814}
1815
1816
1817/**
1818 * Execute a live save pass.
1819 *
1820 * @returns VBox status code.
1821 *
1822 * @param pVM The VM handle.
1823 * @param pSSM The SSM handle.
1824 */
1825static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1826{
1827 int rc;
1828
1829 /*
1830 * Save the MMIO2 and ROM range IDs in pass 0.
1831 */
1832 if (uPass == 0)
1833 {
1834 rc = pgmR3SaveRamConfig(pVM, pSSM);
1835 if (RT_FAILURE(rc))
1836 return rc;
1837 rc = pgmR3SaveRomRanges(pVM, pSSM);
1838 if (RT_FAILURE(rc))
1839 return rc;
1840 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1841 if (RT_FAILURE(rc))
1842 return rc;
1843 }
1844 /*
1845 * Reset the page-per-second estimate to avoid inflation by the initial
1846 * load of zero pages. pgmR3LiveVote ASSUMES this is done at pass 7.
1847 */
1848 else if (uPass == 7)
1849 {
1850 pVM->pgm.s.LiveSave.cSavedPages = 0;
1851 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
1852 }
1853
1854 /*
1855 * Do the scanning.
1856 */
1857 pgmR3ScanRomPages(pVM);
1858 pgmR3ScanMmio2Pages(pVM, uPass);
1859 pgmR3ScanRamPages(pVM, false /*fFinalPass*/);
1860 pgmR3PoolClearAll(pVM, true /*fFlushRemTlb*/); /** @todo this could perhaps be optimized a bit. */
1861
1862 /*
1863 * Save the pages.
1864 */
1865 if (uPass == 0)
1866 rc = pgmR3SaveRomVirginPages( pVM, pSSM, true /*fLiveSave*/);
1867 else
1868 rc = VINF_SUCCESS;
1869 if (RT_SUCCESS(rc))
1870 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
1871 if (RT_SUCCESS(rc))
1872 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, uPass);
1873 if (RT_SUCCESS(rc))
1874 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, uPass);
1875 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes care of it.) */
1876
1877 return rc;
1878}
1879
1880
1881/**
1882 * Votes on whether the live save phase is done or not.
1883 *
1884 * @returns VBox status code.
1885 *
1886 * @param pVM The VM handle.
1887 * @param pSSM The SSM handle.
1888 * @param uPass The data pass.
1889 */
1890static DECLCALLBACK(int) pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1891{
1892 /*
1893 * Update and calculate parameters used in the decision making.
1894 */
1895 const uint32_t cHistoryEntries = RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory);
1896
1897 /* update history. */
1898 pgmLock(pVM);
1899 uint32_t const cWrittenToPages = pVM->pgm.s.cWrittenToPages;
1900 pgmUnlock(pVM);
1901 uint32_t const cDirtyNow = pVM->pgm.s.LiveSave.Rom.cDirtyPages
1902 + pVM->pgm.s.LiveSave.Mmio2.cDirtyPages
1903 + pVM->pgm.s.LiveSave.Ram.cDirtyPages
1904 + cWrittenToPages;
1905 uint32_t i = pVM->pgm.s.LiveSave.iDirtyPagesHistory;
1906 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = cDirtyNow;
1907 pVM->pgm.s.LiveSave.iDirtyPagesHistory = (i + 1) % cHistoryEntries;
1908
1909 /* calc shortterm average (4 passes). */
1910 AssertCompile(RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory) > 4);
1911 uint64_t cTotal = pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1912 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 1) % cHistoryEntries];
1913 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 2) % cHistoryEntries];
1914 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 3) % cHistoryEntries];
1915 uint32_t const cDirtyPagesShort = cTotal / 4;
1916 pVM->pgm.s.LiveSave.cDirtyPagesShort = cDirtyPagesShort;
1917
1918 /* calc longterm average. */
1919 cTotal = 0;
1920 if (uPass < cHistoryEntries)
1921 for (i = 0; i < cHistoryEntries && i <= uPass; i++)
1922 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1923 else
1924 for (i = 0; i < cHistoryEntries; i++)
1925 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1926 uint32_t const cDirtyPagesLong = cTotal / cHistoryEntries;
1927 pVM->pgm.s.LiveSave.cDirtyPagesLong = cDirtyPagesLong;
1928
1929 /* estimate the speed */
1930 uint64_t cNsElapsed = RTTimeNanoTS() - pVM->pgm.s.LiveSave.uSaveStartNS;
1931 uint32_t cPagesPerSecond = (uint32_t)( pVM->pgm.s.LiveSave.cSavedPages
1932 / ((long double)cNsElapsed / 1000000000.0) );
1933 pVM->pgm.s.LiveSave.cPagesPerSecond = cPagesPerSecond;
1934
1935 /*
1936 * Try make a decision.
1937 */
1938 if ( cDirtyPagesShort <= cDirtyPagesLong
1939 && ( cDirtyNow <= cDirtyPagesShort
1940 || cDirtyNow - cDirtyPagesShort < RT_MIN(cDirtyPagesShort / 8, 16)
1941 )
1942 )
1943 {
1944 if (uPass > 10)
1945 {
1946 uint32_t cMsLeftShort = (uint32_t)(cDirtyPagesShort / (long double)cPagesPerSecond * 1000.0);
1947 uint32_t cMsLeftLong = (uint32_t)(cDirtyPagesLong / (long double)cPagesPerSecond * 1000.0);
1948 uint32_t cMsMaxDowntime = SSMR3HandleMaxDowntime(pSSM);
1949 if (cMsMaxDowntime < 32)
1950 cMsMaxDowntime = 32;
1951 if ( ( cMsLeftLong <= cMsMaxDowntime
1952 && cMsLeftShort < cMsMaxDowntime)
1953 || cMsLeftShort < cMsMaxDowntime / 2
1954 )
1955 {
1956 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u|%ums cDirtyPagesLong=%u|%ums cMsMaxDowntime=%u\n",
1957 uPass, cDirtyPagesShort, cMsLeftShort, cDirtyPagesLong, cMsLeftLong, cMsMaxDowntime));
1958 return VINF_SUCCESS;
1959 }
1960 }
1961 else
1962 {
1963 if ( ( cDirtyPagesShort <= 128
1964 && cDirtyPagesLong <= 1024)
1965 || cDirtyPagesLong <= 256
1966 )
1967 {
1968 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u cDirtyPagesLong=%u\n", uPass, cDirtyPagesShort, cDirtyPagesLong));
1969 return VINF_SUCCESS;
1970 }
1971 }
1972 }
1973
1974 /*
1975 * Come up with a completion percentage. Currently this is a simple
1976 * dirty page (long term) vs. total pages ratio + some pass trickery.
1977 */
1978 unsigned uPctDirty = (unsigned)( (long double)cDirtyPagesLong
1979 / (pVM->pgm.s.cAllPages - pVM->pgm.s.LiveSave.cIgnoredPages - pVM->pgm.s.cZeroPages) );
1980 if (uPctDirty <= 100)
1981 SSMR3HandleReportLivePercent(pSSM, RT_MIN(100 - uPctDirty, uPass * 2));
1982 else
1983 AssertMsgFailed(("uPctDirty=%u cDirtyPagesLong=%#x cAllPages=%#x cIgnoredPages=%#x cZeroPages=%#x\n",
1984 uPctDirty, cDirtyPagesLong, pVM->pgm.s.cAllPages, pVM->pgm.s.LiveSave.cIgnoredPages, pVM->pgm.s.cZeroPages));
1985
1986 return VINF_SSM_VOTE_FOR_ANOTHER_PASS;
1987}
1988
1989
1990/**
1991 * Prepare for a live save operation.
1992 *
1993 * This will attempt to allocate and initialize the tracking structures. It
1994 * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
1995 * pgmR3SaveDone will do the cleanups.
1996 *
1997 * @returns VBox status code.
1998 *
1999 * @param pVM The VM handle.
2000 * @param pSSM The SSM handle.
2001 */
2002static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
2003{
2004 /*
2005 * Indicate that we will be using the write monitoring.
2006 */
2007 pgmLock(pVM);
2008 /** @todo find a way of mediating this when more users are added. */
2009 if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
2010 {
2011 pgmUnlock(pVM);
2012 AssertLogRelFailedReturn(VERR_INTERNAL_ERROR_2);
2013 }
2014 pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
2015 pgmUnlock(pVM);
2016
2017 /*
2018 * Initialize the statistics.
2019 */
2020 pVM->pgm.s.LiveSave.Rom.cReadyPages = 0;
2021 pVM->pgm.s.LiveSave.Rom.cDirtyPages = 0;
2022 pVM->pgm.s.LiveSave.Mmio2.cReadyPages = 0;
2023 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages = 0;
2024 pVM->pgm.s.LiveSave.Ram.cReadyPages = 0;
2025 pVM->pgm.s.LiveSave.Ram.cDirtyPages = 0;
2026 pVM->pgm.s.LiveSave.cIgnoredPages = 0;
2027 pVM->pgm.s.LiveSave.fActive = true;
2028 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory); i++)
2029 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = UINT32_MAX / 2;
2030 pVM->pgm.s.LiveSave.iDirtyPagesHistory = 0;
2031 pVM->pgm.s.LiveSave.cSavedPages = 0;
2032 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
2033 pVM->pgm.s.LiveSave.cPagesPerSecond = 8192;
2034
2035 /*
2036 * Per page type.
2037 */
2038 int rc = pgmR3PrepRomPages(pVM);
2039 if (RT_SUCCESS(rc))
2040 rc = pgmR3PrepMmio2Pages(pVM);
2041 if (RT_SUCCESS(rc))
2042 rc = pgmR3PrepRamPages(pVM);
2043 return rc;
2044}
2045
2046
2047/**
2048 * Execute state save operation.
2049 *
2050 * @returns VBox status code.
2051 * @param pVM VM Handle.
2052 * @param pSSM SSM operation handle.
2053 */
2054static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
2055{
2056 int rc = VINF_SUCCESS;
2057 PPGM pPGM = &pVM->pgm.s;
2058
2059 /*
2060 * Lock PGM and set the no-more-writes indicator.
2061 */
2062 pgmLock(pVM);
2063 pVM->pgm.s.fNoMorePhysWrites = true;
2064
2065 /*
2066 * Save basic data (required / unaffected by relocation).
2067 */
2068 bool const fMappingsFixed = pVM->pgm.s.fMappingsFixed;
2069 pVM->pgm.s.fMappingsFixed |= pVM->pgm.s.fMappingsFixedRestored;
2070 SSMR3PutStruct(pSSM, pPGM, &s_aPGMFields[0]);
2071 pVM->pgm.s.fMappingsFixed = fMappingsFixed;
2072
2073 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2074 rc = SSMR3PutStruct(pSSM, &pVM->aCpus[idCpu].pgm.s, &s_aPGMCpuFields[0]);
2075
2076 /*
2077 * Save the (remainder of the) memory.
2078 */
2079 if (RT_SUCCESS(rc))
2080 {
2081 if (pVM->pgm.s.LiveSave.fActive)
2082 {
2083 pgmR3ScanRomPages(pVM);
2084 pgmR3ScanMmio2Pages(pVM, SSM_PASS_FINAL);
2085 pgmR3ScanRamPages(pVM, true /*fFinalPass*/);
2086
2087 rc = pgmR3SaveShadowedRomPages( pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
2088 if (RT_SUCCESS(rc))
2089 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2090 if (RT_SUCCESS(rc))
2091 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2092 }
2093 else
2094 {
2095 rc = pgmR3SaveRamConfig(pVM, pSSM);
2096 if (RT_SUCCESS(rc))
2097 rc = pgmR3SaveRomRanges(pVM, pSSM);
2098 if (RT_SUCCESS(rc))
2099 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
2100 if (RT_SUCCESS(rc))
2101 rc = pgmR3SaveRomVirginPages( pVM, pSSM, false /*fLiveSave*/);
2102 if (RT_SUCCESS(rc))
2103 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
2104 if (RT_SUCCESS(rc))
2105 rc = pgmR3SaveMmio2Pages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2106 if (RT_SUCCESS(rc))
2107 rc = pgmR3SaveRamPages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2108 }
2109 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
2110 }
2111
2112 pgmUnlock(pVM);
2113 return rc;
2114}
2115
2116
2117/**
2118 * Cleans up after an save state operation.
2119 *
2120 * @returns VBox status code.
2121 * @param pVM VM Handle.
2122 * @param pSSM SSM operation handle.
2123 */
2124static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
2125{
2126 /*
2127 * Do per page type cleanups first.
2128 */
2129 if (pVM->pgm.s.LiveSave.fActive)
2130 {
2131 pgmR3DoneRomPages(pVM);
2132 pgmR3DoneMmio2Pages(pVM);
2133 pgmR3DoneRamPages(pVM);
2134 }
2135
2136 /*
2137 * Clear the live save indicator and disengage write monitoring.
2138 */
2139 pgmLock(pVM);
2140 pVM->pgm.s.LiveSave.fActive = false;
2141 /** @todo this is blindly assuming that we're the only user of write
2142 * monitoring. Fix this when more users are added. */
2143 pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
2144 pgmUnlock(pVM);
2145
2146 return VINF_SUCCESS;
2147}
2148
2149
2150/**
2151 * Prepare state load operation.
2152 *
2153 * @returns VBox status code.
2154 * @param pVM VM Handle.
2155 * @param pSSM SSM operation handle.
2156 */
2157static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
2158{
2159 /*
2160 * Call the reset function to make sure all the memory is cleared.
2161 */
2162 PGMR3Reset(pVM);
2163 pVM->pgm.s.LiveSave.fActive = false;
2164 NOREF(pSSM);
2165 return VINF_SUCCESS;
2166}
2167
2168
2169/**
2170 * Load an ignored page.
2171 *
2172 * @returns VBox status code.
2173 * @param pSSM The saved state handle.
2174 */
2175static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
2176{
2177 uint8_t abPage[PAGE_SIZE];
2178 return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
2179}
2180
2181
2182/**
2183 * Loads a page without any bits in the saved state, i.e. making sure it's
2184 * really zero.
2185 *
2186 * @returns VBox status code.
2187 * @param pVM The VM handle.
2188 * @param uType The page type or PGMPAGETYPE_INVALID (old saved
2189 * state).
2190 * @param pPage The guest page tracking structure.
2191 * @param GCPhys The page address.
2192 * @param pRam The ram range (logging).
2193 */
2194static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2195{
2196 if ( PGM_PAGE_GET_TYPE(pPage) != uType
2197 && uType != PGMPAGETYPE_INVALID)
2198 return VERR_SSM_UNEXPECTED_DATA;
2199
2200 /* I think this should be sufficient. */
2201 if ( !PGM_PAGE_IS_ZERO(pPage)
2202 && !PGM_PAGE_IS_BALLOONED(pPage))
2203 return VERR_SSM_UNEXPECTED_DATA;
2204
2205 NOREF(pVM);
2206 NOREF(GCPhys);
2207 NOREF(pRam);
2208 return VINF_SUCCESS;
2209}
2210
2211
2212/**
2213 * Loads a page from the saved state.
2214 *
2215 * @returns VBox status code.
2216 * @param pVM The VM handle.
2217 * @param pSSM The SSM handle.
2218 * @param uType The page type or PGMPAGETYEP_INVALID (old saved
2219 * state).
2220 * @param pPage The guest page tracking structure.
2221 * @param GCPhys The page address.
2222 * @param pRam The ram range (logging).
2223 */
2224static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2225{
2226 /*
2227 * Match up the type, dealing with MMIO2 aliases (dropped).
2228 */
2229 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == uType
2230 || uType == PGMPAGETYPE_INVALID
2231 /* kudge for the expanded PXE bios (r67885) - #5687: */
2232 || ( uType == PGMPAGETYPE_RAM
2233 && GCPhys >= 0xed000
2234 && GCPhys <= 0xeffff
2235 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM)
2236 ,
2237 ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
2238 VERR_SSM_UNEXPECTED_DATA);
2239
2240 /*
2241 * Load the page.
2242 */
2243 PGMPAGEMAPLOCK PgMpLck;
2244 void *pvPage;
2245 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
2246 if (RT_SUCCESS(rc))
2247 {
2248 rc = SSMR3GetMem(pSSM, pvPage, PAGE_SIZE);
2249 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2250 }
2251
2252 return rc;
2253}
2254
2255
2256/**
2257 * Loads a page (counter part to pgmR3SavePage).
2258 *
2259 * @returns VBox status code, fully bitched errors.
2260 * @param pVM The VM handle.
2261 * @param pSSM The SSM handle.
2262 * @param uType The page type.
2263 * @param pPage The page.
2264 * @param GCPhys The page address.
2265 * @param pRam The RAM range (for error messages).
2266 */
2267static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2268{
2269 uint8_t uState;
2270 int rc = SSMR3GetU8(pSSM, &uState);
2271 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
2272 if (uState == 0 /* zero */)
2273 rc = pgmR3LoadPageZeroOld(pVM, uType, pPage, GCPhys, pRam);
2274 else if (uState == 1)
2275 rc = pgmR3LoadPageBitsOld(pVM, pSSM, uType, pPage, GCPhys, pRam);
2276 else
2277 rc = VERR_INTERNAL_ERROR;
2278 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uType=%d GCPhys=%RGp %s rc=%Rrc\n",
2279 pPage, uState, uType, GCPhys, pRam->pszDesc, rc),
2280 rc);
2281 return VINF_SUCCESS;
2282}
2283
2284
2285/**
2286 * Loads a shadowed ROM page.
2287 *
2288 * @returns VBox status code, errors are fully bitched.
2289 * @param pVM The VM handle.
2290 * @param pSSM The saved state handle.
2291 * @param pPage The page.
2292 * @param GCPhys The page address.
2293 * @param pRam The RAM range (for error messages).
2294 */
2295static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2296{
2297 /*
2298 * Load and set the protection first, then load the two pages, the first
2299 * one is the active the other is the passive.
2300 */
2301 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
2302 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_INTERNAL_ERROR);
2303
2304 uint8_t uProt;
2305 int rc = SSMR3GetU8(pSSM, &uProt);
2306 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
2307 PGMROMPROT enmProt = (PGMROMPROT)uProt;
2308 AssertLogRelMsgReturn( enmProt >= PGMROMPROT_INVALID
2309 && enmProt < PGMROMPROT_END,
2310 ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
2311 VERR_SSM_UNEXPECTED_DATA);
2312
2313 if (pRomPage->enmProt != enmProt)
2314 {
2315 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
2316 AssertLogRelRCReturn(rc, rc);
2317 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_INTERNAL_ERROR);
2318 }
2319
2320 PPGMPAGE pPageActive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2321 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2322 uint8_t u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM : PGMPAGETYPE_ROM_SHADOW;
2323 uint8_t u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
2324
2325 /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
2326 * used down the line (will the 2nd page will be written to the first
2327 * one because of a false TLB hit since the TLB is using GCPhys and
2328 * doesn't check the HCPhys of the desired page). */
2329 rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
2330 if (RT_SUCCESS(rc))
2331 {
2332 *pPageActive = *pPage;
2333 rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
2334 }
2335 return rc;
2336}
2337
2338/**
2339 * Ram range flags and bits for older versions of the saved state.
2340 *
2341 * @returns VBox status code.
2342 *
2343 * @param pVM The VM handle
2344 * @param pSSM The SSM handle.
2345 * @param uVersion The saved state version.
2346 */
2347static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2348{
2349 PPGM pPGM = &pVM->pgm.s;
2350
2351 /*
2352 * Ram range flags and bits.
2353 */
2354 uint32_t i = 0;
2355 for (PPGMRAMRANGE pRam = pPGM->pRamRangesXR3; ; pRam = pRam->pNextR3, i++)
2356 {
2357 /* Check the sequence number / separator. */
2358 uint32_t u32Sep;
2359 int rc = SSMR3GetU32(pSSM, &u32Sep);
2360 if (RT_FAILURE(rc))
2361 return rc;
2362 if (u32Sep == ~0U)
2363 break;
2364 if (u32Sep != i)
2365 {
2366 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2367 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2368 }
2369 AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2370
2371 /* Get the range details. */
2372 RTGCPHYS GCPhys;
2373 SSMR3GetGCPhys(pSSM, &GCPhys);
2374 RTGCPHYS GCPhysLast;
2375 SSMR3GetGCPhys(pSSM, &GCPhysLast);
2376 RTGCPHYS cb;
2377 SSMR3GetGCPhys(pSSM, &cb);
2378 uint8_t fHaveBits;
2379 rc = SSMR3GetU8(pSSM, &fHaveBits);
2380 if (RT_FAILURE(rc))
2381 return rc;
2382 if (fHaveBits & ~1)
2383 {
2384 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2385 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2386 }
2387 size_t cchDesc = 0;
2388 char szDesc[256];
2389 szDesc[0] = '\0';
2390 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2391 {
2392 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
2393 if (RT_FAILURE(rc))
2394 return rc;
2395 /* Since we've modified the description strings in r45878, only compare
2396 them if the saved state is more recent. */
2397 if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
2398 cchDesc = strlen(szDesc);
2399 }
2400
2401 /*
2402 * Match it up with the current range.
2403 *
2404 * Note there is a hack for dealing with the high BIOS mapping
2405 * in the old saved state format, this means we might not have
2406 * a 1:1 match on success.
2407 */
2408 if ( ( GCPhys != pRam->GCPhys
2409 || GCPhysLast != pRam->GCPhysLast
2410 || cb != pRam->cb
2411 || ( cchDesc
2412 && strcmp(szDesc, pRam->pszDesc)) )
2413 /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
2414 && ( uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
2415 || GCPhys != UINT32_C(0xfff80000)
2416 || GCPhysLast != UINT32_C(0xffffffff)
2417 || pRam->GCPhysLast != GCPhysLast
2418 || pRam->GCPhys < GCPhys
2419 || !fHaveBits)
2420 )
2421 {
2422 LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
2423 "State : %RGp-%RGp %RGp bytes %s %s\n",
2424 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc,
2425 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
2426 /*
2427 * If we're loading a state for debugging purpose, don't make a fuss if
2428 * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
2429 */
2430 if ( SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
2431 || GCPhys < 8 * _1M)
2432 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2433 N_("RAM range mismatch; saved={%RGp-%RGp %RGp bytes %s %s} config={%RGp-%RGp %RGp bytes %s %s}"),
2434 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc,
2435 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc);
2436
2437 AssertMsgFailed(("debug skipping not implemented, sorry\n"));
2438 continue;
2439 }
2440
2441 uint32_t cPages = (GCPhysLast - GCPhys + 1) >> PAGE_SHIFT;
2442 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2443 {
2444 /*
2445 * Load the pages one by one.
2446 */
2447 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2448 {
2449 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2450 PPGMPAGE pPage = &pRam->aPages[iPage];
2451 uint8_t uType;
2452 rc = SSMR3GetU8(pSSM, &uType);
2453 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
2454 if (uType == PGMPAGETYPE_ROM_SHADOW)
2455 rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
2456 else
2457 rc = pgmR3LoadPageOld(pVM, pSSM, uType, pPage, GCPhysPage, pRam);
2458 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2459 }
2460 }
2461 else
2462 {
2463 /*
2464 * Old format.
2465 */
2466
2467 /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
2468 The rest is generally irrelevant and wrong since the stuff have to match registrations. */
2469 uint32_t fFlags = 0;
2470 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2471 {
2472 uint16_t u16Flags;
2473 rc = SSMR3GetU16(pSSM, &u16Flags);
2474 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2475 fFlags |= u16Flags;
2476 }
2477
2478 /* Load the bits */
2479 if ( !fHaveBits
2480 && GCPhysLast < UINT32_C(0xe0000000))
2481 {
2482 /*
2483 * Dynamic chunks.
2484 */
2485 const uint32_t cPagesInChunk = (1*1024*1024) >> PAGE_SHIFT;
2486 AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
2487 ("cPages=%#x cPagesInChunk=%#x\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
2488 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2489
2490 for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
2491 {
2492 uint8_t fPresent;
2493 rc = SSMR3GetU8(pSSM, &fPresent);
2494 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2495 AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
2496 ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
2497 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2498
2499 for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
2500 {
2501 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2502 PPGMPAGE pPage = &pRam->aPages[iPage];
2503 if (fPresent)
2504 {
2505 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
2506 rc = pgmR3LoadPageToDevNullOld(pSSM);
2507 else
2508 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2509 }
2510 else
2511 rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2512 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2513 }
2514 }
2515 }
2516 else if (pRam->pvR3)
2517 {
2518 /*
2519 * MMIO2.
2520 */
2521 AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
2522 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2523 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2524 AssertLogRelMsgReturn(pRam->pvR3,
2525 ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
2526 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2527
2528 rc = SSMR3GetMem(pSSM, pRam->pvR3, pRam->cb);
2529 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
2530 }
2531 else if (GCPhysLast < UINT32_C(0xfff80000))
2532 {
2533 /*
2534 * PCI MMIO, no pages saved.
2535 */
2536 }
2537 else
2538 {
2539 /*
2540 * Load the 0xfff80000..0xffffffff BIOS range.
2541 * It starts with X reserved pages that we have to skip over since
2542 * the RAMRANGE create by the new code won't include those.
2543 */
2544 AssertLogRelMsgReturn( !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
2545 && (fFlags & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
2546 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2547 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2548 AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
2549 ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
2550 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2551
2552 /* Skip wasted reserved pages before the ROM. */
2553 while (GCPhys < pRam->GCPhys)
2554 {
2555 rc = pgmR3LoadPageToDevNullOld(pSSM);
2556 GCPhys += PAGE_SIZE;
2557 }
2558
2559 /* Load the bios pages. */
2560 cPages = pRam->cb >> PAGE_SHIFT;
2561 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2562 {
2563 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2564 PPGMPAGE pPage = &pRam->aPages[iPage];
2565
2566 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
2567 ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
2568 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2569 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
2570 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2571 }
2572 }
2573 }
2574 }
2575
2576 return VINF_SUCCESS;
2577}
2578
2579
2580/**
2581 * Worker for pgmR3Load and pgmR3LoadLocked.
2582 *
2583 * @returns VBox status code.
2584 *
2585 * @param pVM The VM handle.
2586 * @param pSSM The SSM handle.
2587 * @param uVersion The PGM saved state unit version.
2588 * @param uPass The pass number.
2589 *
2590 * @todo This needs splitting up if more record types or code twists are
2591 * added...
2592 */
2593static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2594{
2595 /*
2596 * Process page records until we hit the terminator.
2597 */
2598 RTGCPHYS GCPhys = NIL_RTGCPHYS;
2599 PPGMRAMRANGE pRamHint = NULL;
2600 uint8_t id = UINT8_MAX;
2601 uint32_t iPage = UINT32_MAX - 10;
2602 PPGMROMRANGE pRom = NULL;
2603 PPGMMMIO2RANGE pMmio2 = NULL;
2604
2605 /*
2606 * We batch up pages that should be freed instead of calling GMM for
2607 * each and every one of them. Note that we'll lose the pages in most
2608 * failure paths - this should probably be addressed one day.
2609 */
2610 uint32_t cPendingPages = 0;
2611 PGMMFREEPAGESREQ pReq;
2612 int rc = GMMR3FreePagesPrepare(pVM, &pReq, 128 /* batch size */, GMMACCOUNT_BASE);
2613 AssertLogRelRCReturn(rc, rc);
2614
2615 for (;;)
2616 {
2617 /*
2618 * Get the record type and flags.
2619 */
2620 uint8_t u8;
2621 rc = SSMR3GetU8(pSSM, &u8);
2622 if (RT_FAILURE(rc))
2623 return rc;
2624 if (u8 == PGM_STATE_REC_END)
2625 {
2626 /*
2627 * Finish off any pages pending freeing.
2628 */
2629 if (cPendingPages)
2630 {
2631 Log(("pgmR3LoadMemory: GMMR3FreePagesPerform pVM=%p cPendingPages=%u\n", pVM, cPendingPages));
2632 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2633 AssertLogRelRCReturn(rc, rc);
2634 }
2635 GMMR3FreePagesCleanup(pReq);
2636 return VINF_SUCCESS;
2637 }
2638 AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2639 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2640 {
2641 /*
2642 * RAM page.
2643 */
2644 case PGM_STATE_REC_RAM_ZERO:
2645 case PGM_STATE_REC_RAM_RAW:
2646 case PGM_STATE_REC_RAM_BALLOONED:
2647 {
2648 /*
2649 * Get the address and resolve it into a page descriptor.
2650 */
2651 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2652 GCPhys += PAGE_SIZE;
2653 else
2654 {
2655 rc = SSMR3GetGCPhys(pSSM, &GCPhys);
2656 if (RT_FAILURE(rc))
2657 return rc;
2658 }
2659 AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2660
2661 PPGMPAGE pPage;
2662 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
2663 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2664
2665 /*
2666 * Take action according to the record type.
2667 */
2668 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2669 {
2670 case PGM_STATE_REC_RAM_ZERO:
2671 {
2672 if (PGM_PAGE_IS_ZERO(pPage))
2673 break;
2674
2675 /* Ballooned pages must be unmarked (live snapshot and
2676 teleportation scenarios). */
2677 if (PGM_PAGE_IS_BALLOONED(pPage))
2678 {
2679 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2680 if (uVersion == PGM_SAVED_STATE_VERSION_BALLOON_BROKEN)
2681 break;
2682 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2683 break;
2684 }
2685
2686 AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_INTERNAL_ERROR_5);
2687
2688 /* If this is a ROM page, we must clear it and not try
2689 free it... */
2690 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM
2691 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM_SHADOW)
2692 {
2693 PGMPAGEMAPLOCK PgMpLck;
2694 void *pvDstPage;
2695 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2696 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2697
2698 ASMMemZeroPage(pvDstPage);
2699 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2700 }
2701 /* Free it only if it's not part of a previously
2702 allocated large page (no need to clear the page). */
2703 else if ( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2704 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED)
2705 {
2706 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys);
2707 AssertRCReturn(rc, rc);
2708 }
2709 /** @todo handle large pages (see #5545) */
2710 break;
2711 }
2712
2713 case PGM_STATE_REC_RAM_BALLOONED:
2714 {
2715 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2716 if (PGM_PAGE_IS_BALLOONED(pPage))
2717 break;
2718
2719 /* We don't map ballooned pages in our shadow page tables, let's
2720 just free it if allocated and mark as ballooned. See #5515. */
2721 if (PGM_PAGE_IS_ALLOCATED(pPage))
2722 {
2723 /** @todo handle large pages + ballooning when it works. (see #5515, #5545). */
2724 AssertLogRelMsgReturn( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2725 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED,
2726 ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_INTERNAL_ERROR_5);
2727
2728 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys);
2729 AssertRCReturn(rc, rc);
2730 }
2731 Assert(PGM_PAGE_IS_ZERO(pPage));
2732 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
2733 break;
2734 }
2735
2736 case PGM_STATE_REC_RAM_RAW:
2737 {
2738 PGMPAGEMAPLOCK PgMpLck;
2739 void *pvDstPage;
2740 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2741 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2742 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2743 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2744 if (RT_FAILURE(rc))
2745 return rc;
2746 break;
2747 }
2748
2749 default:
2750 AssertMsgFailedReturn(("%#x\n", u8), VERR_INTERNAL_ERROR);
2751 }
2752 id = UINT8_MAX;
2753 break;
2754 }
2755
2756 /*
2757 * MMIO2 page.
2758 */
2759 case PGM_STATE_REC_MMIO2_RAW:
2760 case PGM_STATE_REC_MMIO2_ZERO:
2761 {
2762 /*
2763 * Get the ID + page number and resolved that into a MMIO2 page.
2764 */
2765 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2766 iPage++;
2767 else
2768 {
2769 SSMR3GetU8(pSSM, &id);
2770 rc = SSMR3GetU32(pSSM, &iPage);
2771 if (RT_FAILURE(rc))
2772 return rc;
2773 }
2774 if ( !pMmio2
2775 || pMmio2->idSavedState != id)
2776 {
2777 for (pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
2778 if (pMmio2->idSavedState == id)
2779 break;
2780 AssertLogRelMsgReturn(pMmio2, ("id=%#u iPage=%#x\n", id, iPage), VERR_INTERNAL_ERROR);
2781 }
2782 AssertLogRelMsgReturn(iPage < (pMmio2->RamRange.cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pMmio2->RamRange.cb, pMmio2->RamRange.pszDesc), VERR_INTERNAL_ERROR);
2783 void *pvDstPage = (uint8_t *)pMmio2->RamRange.pvR3 + ((size_t)iPage << PAGE_SHIFT);
2784
2785 /*
2786 * Load the page bits.
2787 */
2788 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_MMIO2_ZERO)
2789 ASMMemZeroPage(pvDstPage);
2790 else
2791 {
2792 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2793 if (RT_FAILURE(rc))
2794 return rc;
2795 }
2796 GCPhys = NIL_RTGCPHYS;
2797 break;
2798 }
2799
2800 /*
2801 * ROM pages.
2802 */
2803 case PGM_STATE_REC_ROM_VIRGIN:
2804 case PGM_STATE_REC_ROM_SHW_RAW:
2805 case PGM_STATE_REC_ROM_SHW_ZERO:
2806 case PGM_STATE_REC_ROM_PROT:
2807 {
2808 /*
2809 * Get the ID + page number and resolved that into a ROM page descriptor.
2810 */
2811 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2812 iPage++;
2813 else
2814 {
2815 SSMR3GetU8(pSSM, &id);
2816 rc = SSMR3GetU32(pSSM, &iPage);
2817 if (RT_FAILURE(rc))
2818 return rc;
2819 }
2820 if ( !pRom
2821 || pRom->idSavedState != id)
2822 {
2823 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2824 if (pRom->idSavedState == id)
2825 break;
2826 AssertLogRelMsgReturn(pRom, ("id=%#u iPage=%#x\n", id, iPage), VERR_INTERNAL_ERROR);
2827 }
2828 AssertLogRelMsgReturn(iPage < (pRom->cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pRom->cb, pRom->pszDesc), VERR_INTERNAL_ERROR);
2829 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2830 GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
2831
2832 /*
2833 * Get and set the protection.
2834 */
2835 uint8_t u8Prot;
2836 rc = SSMR3GetU8(pSSM, &u8Prot);
2837 if (RT_FAILURE(rc))
2838 return rc;
2839 PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
2840 AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_INTERNAL_ERROR);
2841
2842 if (enmProt != pRomPage->enmProt)
2843 {
2844 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
2845 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2846 N_("Protection change of unshadowed ROM page: GCPhys=%RGp enmProt=%d %s"),
2847 GCPhys, enmProt, pRom->pszDesc);
2848 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
2849 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2850 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_INTERNAL_ERROR);
2851 }
2852 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
2853 break; /* done */
2854
2855 /*
2856 * Get the right page descriptor.
2857 */
2858 PPGMPAGE pRealPage;
2859 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2860 {
2861 case PGM_STATE_REC_ROM_VIRGIN:
2862 if (!PGMROMPROT_IS_ROM(enmProt))
2863 pRealPage = &pRomPage->Virgin;
2864 else
2865 pRealPage = NULL;
2866 break;
2867
2868 case PGM_STATE_REC_ROM_SHW_RAW:
2869 case PGM_STATE_REC_ROM_SHW_ZERO:
2870 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
2871 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2872 N_("Shadowed / non-shadowed page type mismatch: GCPhys=%RGp enmProt=%d %s"),
2873 GCPhys, enmProt, pRom->pszDesc);
2874 if (PGMROMPROT_IS_ROM(enmProt))
2875 pRealPage = &pRomPage->Shadow;
2876 else
2877 pRealPage = NULL;
2878 break;
2879
2880 default: AssertLogRelFailedReturn(VERR_INTERNAL_ERROR); /* shut up gcc */
2881 }
2882 if (!pRealPage)
2883 {
2884 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pRealPage, &pRamHint);
2885 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2886 }
2887
2888 /*
2889 * Make it writable and map it (if necessary).
2890 */
2891 void *pvDstPage = NULL;
2892 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2893 {
2894 case PGM_STATE_REC_ROM_SHW_ZERO:
2895 if ( PGM_PAGE_IS_ZERO(pRealPage)
2896 || PGM_PAGE_IS_BALLOONED(pRealPage))
2897 break;
2898 /** @todo implement zero page replacing. */
2899 /* fall thru */
2900 case PGM_STATE_REC_ROM_VIRGIN:
2901 case PGM_STATE_REC_ROM_SHW_RAW:
2902 {
2903 rc = pgmPhysPageMakeWritableAndMap(pVM, pRealPage, GCPhys, &pvDstPage);
2904 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2905 break;
2906 }
2907 }
2908
2909 /*
2910 * Load the bits.
2911 */
2912 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2913 {
2914 case PGM_STATE_REC_ROM_SHW_ZERO:
2915 if (pvDstPage)
2916 ASMMemZeroPage(pvDstPage);
2917 break;
2918
2919 case PGM_STATE_REC_ROM_VIRGIN:
2920 case PGM_STATE_REC_ROM_SHW_RAW:
2921 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2922 if (RT_FAILURE(rc))
2923 return rc;
2924 break;
2925 }
2926 GCPhys = NIL_RTGCPHYS;
2927 break;
2928 }
2929
2930 /*
2931 * Unknown type.
2932 */
2933 default:
2934 AssertLogRelMsgFailedReturn(("%#x\n", u8), VERR_INTERNAL_ERROR);
2935 }
2936 } /* forever */
2937}
2938
2939
2940/**
2941 * Worker for pgmR3Load.
2942 *
2943 * @returns VBox status code.
2944 *
2945 * @param pVM The VM handle.
2946 * @param pSSM The SSM handle.
2947 * @param uVersion The saved state version.
2948 */
2949static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2950{
2951 PPGM pPGM = &pVM->pgm.s;
2952 int rc;
2953 uint32_t u32Sep;
2954
2955 /*
2956 * Load basic data (required / unaffected by relocation).
2957 */
2958 if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
2959 {
2960 if (uVersion > PGM_SAVED_STATE_VERSION_PRE_BALLOON)
2961 rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFields[0]);
2962 else
2963 rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFieldsPreBalloon[0]);
2964
2965 AssertLogRelRCReturn(rc, rc);
2966
2967 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2968 {
2969 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
2970 rc = SSMR3GetStruct(pSSM, &pVM->aCpus[i].pgm.s, &s_aPGMCpuFieldsPrePae[0]);
2971 else
2972 rc = SSMR3GetStruct(pSSM, &pVM->aCpus[i].pgm.s, &s_aPGMCpuFields[0]);
2973 AssertLogRelRCReturn(rc, rc);
2974 }
2975 }
2976 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2977 {
2978 AssertRelease(pVM->cCpus == 1);
2979
2980 PGMOLD pgmOld;
2981 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
2982 AssertLogRelRCReturn(rc, rc);
2983
2984 pPGM->fMappingsFixed = pgmOld.fMappingsFixed;
2985 pPGM->GCPtrMappingFixed = pgmOld.GCPtrMappingFixed;
2986 pPGM->cbMappingFixed = pgmOld.cbMappingFixed;
2987
2988 pVM->aCpus[0].pgm.s.fA20Enabled = pgmOld.fA20Enabled;
2989 pVM->aCpus[0].pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
2990 pVM->aCpus[0].pgm.s.enmGuestMode = pgmOld.enmGuestMode;
2991 }
2992 else
2993 {
2994 AssertRelease(pVM->cCpus == 1);
2995
2996 SSMR3GetBool(pSSM, &pPGM->fMappingsFixed);
2997 SSMR3GetGCPtr(pSSM, &pPGM->GCPtrMappingFixed);
2998 SSMR3GetU32(pSSM, &pPGM->cbMappingFixed);
2999
3000 uint32_t cbRamSizeIgnored;
3001 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
3002 if (RT_FAILURE(rc))
3003 return rc;
3004 SSMR3GetGCPhys(pSSM, &pVM->aCpus[0].pgm.s.GCPhysA20Mask);
3005
3006 uint32_t u32 = 0;
3007 SSMR3GetUInt(pSSM, &u32);
3008 pVM->aCpus[0].pgm.s.fA20Enabled = !!u32;
3009 SSMR3GetUInt(pSSM, &pVM->aCpus[0].pgm.s.fSyncFlags);
3010 RTUINT uGuestMode;
3011 SSMR3GetUInt(pSSM, &uGuestMode);
3012 pVM->aCpus[0].pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
3013
3014 /* check separator. */
3015 SSMR3GetU32(pSSM, &u32Sep);
3016 if (RT_FAILURE(rc))
3017 return rc;
3018 if (u32Sep != (uint32_t)~0)
3019 {
3020 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
3021 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
3022 }
3023 }
3024
3025 /*
3026 * The guest mappings - skipped now, see re-fixation in the caller.
3027 */
3028 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
3029 {
3030 for (uint32_t i = 0; ; i++)
3031 {
3032 rc = SSMR3GetU32(pSSM, &u32Sep); /* sequence number */
3033 if (RT_FAILURE(rc))
3034 return rc;
3035 if (u32Sep == ~0U)
3036 break;
3037 AssertMsgReturn(u32Sep == i, ("u32Sep=%#x i=%#x\n", u32Sep, i), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
3038
3039 char szDesc[256];
3040 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
3041 if (RT_FAILURE(rc))
3042 return rc;
3043 RTGCPTR GCPtrIgnore;
3044 SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* GCPtr */
3045 rc = SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* cPTs */
3046 if (RT_FAILURE(rc))
3047 return rc;
3048 }
3049 }
3050
3051 /*
3052 * Load the RAM contents.
3053 */
3054 if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
3055 {
3056 if (!pVM->pgm.s.LiveSave.fActive)
3057 {
3058 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3059 {
3060 rc = pgmR3LoadRamConfig(pVM, pSSM);
3061 if (RT_FAILURE(rc))
3062 return rc;
3063 }
3064 rc = pgmR3LoadRomRanges(pVM, pSSM);
3065 if (RT_FAILURE(rc))
3066 return rc;
3067 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3068 if (RT_FAILURE(rc))
3069 return rc;
3070 }
3071
3072 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, SSM_PASS_FINAL);
3073 }
3074 else
3075 rc = pgmR3LoadMemoryOld(pVM, pSSM, uVersion);
3076
3077 /* Refresh balloon accounting. */
3078 if (pVM->pgm.s.cBalloonedPages)
3079 {
3080 Log(("pgmR3LoadFinalLocked: pVM=%p cBalloonedPages=%#x\n", pVM, pVM->pgm.s.cBalloonedPages));
3081 rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_INFLATE, pVM->pgm.s.cBalloonedPages);
3082 AssertRCReturn(rc, rc);
3083 }
3084 return rc;
3085}
3086
3087
3088/**
3089 * Execute state load operation.
3090 *
3091 * @returns VBox status code.
3092 * @param pVM VM Handle.
3093 * @param pSSM SSM operation handle.
3094 * @param uVersion Data layout version.
3095 * @param uPass The data pass.
3096 */
3097static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3098{
3099 int rc;
3100 PPGM pPGM = &pVM->pgm.s;
3101
3102 /*
3103 * Validate version.
3104 */
3105 if ( ( uPass != SSM_PASS_FINAL
3106 && uVersion != PGM_SAVED_STATE_VERSION
3107 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3108 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3109 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3110 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3111 || ( uVersion != PGM_SAVED_STATE_VERSION
3112 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3113 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3114 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3115 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG
3116 && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
3117 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
3118 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
3119 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
3120 )
3121 {
3122 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
3123 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3124 }
3125
3126 /*
3127 * Do the loading while owning the lock because a bunch of the functions
3128 * we're using requires this.
3129 */
3130 if (uPass != SSM_PASS_FINAL)
3131 {
3132 pgmLock(pVM);
3133 if (uPass != 0)
3134 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3135 else
3136 {
3137 pVM->pgm.s.LiveSave.fActive = true;
3138 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3139 rc = pgmR3LoadRamConfig(pVM, pSSM);
3140 else
3141 rc = VINF_SUCCESS;
3142 if (RT_SUCCESS(rc))
3143 rc = pgmR3LoadRomRanges(pVM, pSSM);
3144 if (RT_SUCCESS(rc))
3145 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3146 if (RT_SUCCESS(rc))
3147 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3148 }
3149 pgmUnlock(pVM);
3150 }
3151 else
3152 {
3153 pgmLock(pVM);
3154 rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
3155 pVM->pgm.s.LiveSave.fActive = false;
3156 pgmUnlock(pVM);
3157 if (RT_SUCCESS(rc))
3158 {
3159 /*
3160 * We require a full resync now.
3161 */
3162 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3163 {
3164 PVMCPU pVCpu = &pVM->aCpus[i];
3165 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
3166 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3167 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
3168 /** @todo For guest PAE, we might get the wrong
3169 * aGCPhysGstPaePDs values now. We should used the
3170 * saved ones... Postponing this since it nothing new
3171 * and PAE/PDPTR needs some general readjusting, see
3172 * @bugref{#5880}. */
3173 }
3174
3175 pgmR3HandlerPhysicalUpdateAll(pVM);
3176
3177 /*
3178 * Change the paging mode and restore PGMCPU::GCPhysCR3.
3179 * (The latter requires the CPUM state to be restored already.)
3180 */
3181 if (CPUMR3IsStateRestorePending(pVM))
3182 return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
3183 N_("PGM was unexpectedly restored before CPUM"));
3184
3185 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3186 {
3187 PVMCPU pVCpu = &pVM->aCpus[i];
3188
3189 rc = PGMR3ChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode);
3190 AssertLogRelRCReturn(rc, rc);
3191
3192 /* Update pVM->pgm.s.GCPhysCR3. */
3193 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS || FTMIsDeltaLoadSaveActive(pVM));
3194 RTGCPHYS GCPhysCR3 = CPUMGetGuestCR3(pVCpu);
3195 if ( pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE
3196 || pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE_NX
3197 || pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64
3198 || pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
3199 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAE_PAGE_MASK);
3200 else
3201 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAGE_MASK);
3202 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3203
3204 /* Update the PSE, NX flags and validity masks. */
3205 pVCpu->pgm.s.fGst32BitPageSizeExtension = CPUMIsGuestPageSizeExtEnabled(pVCpu);
3206 PGMNotifyNxeChanged(pVCpu, CPUMIsGuestNXEnabled(pVCpu));
3207 }
3208
3209 /*
3210 * Try re-fixate the guest mappings.
3211 */
3212 pVM->pgm.s.fMappingsFixedRestored = false;
3213 if ( pVM->pgm.s.fMappingsFixed
3214 && pgmMapAreMappingsEnabled(pVM))
3215 {
3216 RTGCPTR GCPtrFixed = pVM->pgm.s.GCPtrMappingFixed;
3217 uint32_t cbFixed = pVM->pgm.s.cbMappingFixed;
3218 pVM->pgm.s.fMappingsFixed = false;
3219
3220 uint32_t cbRequired;
3221 int rc2 = PGMR3MappingsSize(pVM, &cbRequired); AssertRC(rc2);
3222 if ( RT_SUCCESS(rc2)
3223 && cbRequired > cbFixed)
3224 rc2 = VERR_OUT_OF_RANGE;
3225 if (RT_SUCCESS(rc2))
3226 rc2 = pgmR3MappingsFixInternal(pVM, GCPtrFixed, cbFixed);
3227 if (RT_FAILURE(rc2))
3228 {
3229 LogRel(("PGM: Unable to re-fixate the guest mappings at %RGv-%RGv: rc=%Rrc (cbRequired=%#x)\n",
3230 GCPtrFixed, GCPtrFixed + cbFixed, rc2, cbRequired));
3231 pVM->pgm.s.fMappingsFixed = false;
3232 pVM->pgm.s.fMappingsFixedRestored = true;
3233 pVM->pgm.s.GCPtrMappingFixed = GCPtrFixed;
3234 pVM->pgm.s.cbMappingFixed = cbFixed;
3235 }
3236 }
3237 else
3238 {
3239 /* We used to set fixed + disabled while we only use disabled now,
3240 so wipe the state to avoid any confusion. */
3241 pVM->pgm.s.fMappingsFixed = false;
3242 pVM->pgm.s.GCPtrMappingFixed = NIL_RTGCPTR;
3243 pVM->pgm.s.cbMappingFixed = 0;
3244 }
3245
3246 /*
3247 * If we have floating mappings, do a CR3 sync now to make sure the HMA
3248 * doesn't conflict with guest code / data and thereby cause trouble
3249 * when restoring other components like PATM.
3250 */
3251 if (pgmMapAreMappingsFloating(pVM))
3252 {
3253 PVMCPU pVCpu = &pVM->aCpus[0];
3254 rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), true);
3255 if (RT_FAILURE(rc))
3256 return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
3257 N_("PGMSyncCR3 failed unexpectedly with rc=%Rrc"), rc);
3258
3259 /* Make sure to re-sync before executing code. */
3260 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
3261 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3262 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
3263 }
3264 }
3265 }
3266
3267 return rc;
3268}
3269
3270
3271/**
3272 * Registers the saved state callbacks with SSM.
3273 *
3274 * @returns VBox status code.
3275 * @param pVM Pointer to VM structure.
3276 * @param cbRam The RAM size.
3277 */
3278int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
3279{
3280 return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
3281 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
3282 NULL, pgmR3SaveExec, pgmR3SaveDone,
3283 pgmR3LoadPrep, pgmR3Load, NULL);
3284}
3285
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette