VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp@ 81237

Last change on this file since 81237 was 80673, checked in by vboxsync, 5 years ago

PDM/DevHlp: Need to wrap the crit sect methods so we can pass on pVM to the crit sect code later, as we won't be able to sore pointers in the internal critical section data anymore. bugref:9218

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 126.9 KB
Line 
1/* $Id: PGMSavedState.cpp 80673 2019-09-09 14:02:22Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, The Saved State Part.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/ssm.h>
26#include <VBox/vmm/pdmdrv.h>
27#include <VBox/vmm/pdmdev.h>
28#include "PGMInternal.h"
29#include <VBox/vmm/vm.h>
30#include "PGMInline.h"
31
32#include <VBox/param.h>
33#include <VBox/err.h>
34
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#include <iprt/crc.h>
38#include <iprt/mem.h>
39#include <iprt/sha.h>
40#include <iprt/string.h>
41#include <iprt/thread.h>
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** Saved state data unit version. */
48#define PGM_SAVED_STATE_VERSION 14
49/** Saved state data unit version before the PAE PDPE registers. */
50#define PGM_SAVED_STATE_VERSION_PRE_PAE 13
51/** Saved state data unit version after this includes ballooned page flags in
52 * the state (see @bugref{5515}). */
53#define PGM_SAVED_STATE_VERSION_BALLOON_BROKEN 12
54/** Saved state before the balloon change. */
55#define PGM_SAVED_STATE_VERSION_PRE_BALLOON 11
56/** Saved state data unit version used during 3.1 development, misses the RAM
57 * config. */
58#define PGM_SAVED_STATE_VERSION_NO_RAM_CFG 10
59/** Saved state data unit version for 3.0 (pre teleportation). */
60#define PGM_SAVED_STATE_VERSION_3_0_0 9
61/** Saved state data unit version for 2.2.2 and later. */
62#define PGM_SAVED_STATE_VERSION_2_2_2 8
63/** Saved state data unit version for 2.2.0. */
64#define PGM_SAVED_STATE_VERSION_RR_DESC 7
65/** Saved state data unit version. */
66#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6
67
68
69/** @name Sparse state record types
70 * @{ */
71/** Zero page. No data. */
72#define PGM_STATE_REC_RAM_ZERO UINT8_C(0x00)
73/** Raw page. */
74#define PGM_STATE_REC_RAM_RAW UINT8_C(0x01)
75/** Raw MMIO2 page. */
76#define PGM_STATE_REC_MMIO2_RAW UINT8_C(0x02)
77/** Zero MMIO2 page. */
78#define PGM_STATE_REC_MMIO2_ZERO UINT8_C(0x03)
79/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
80#define PGM_STATE_REC_ROM_VIRGIN UINT8_C(0x04)
81/** Raw shadowed ROM page. The protection (8-bit) precedes the raw bits. */
82#define PGM_STATE_REC_ROM_SHW_RAW UINT8_C(0x05)
83/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
84#define PGM_STATE_REC_ROM_SHW_ZERO UINT8_C(0x06)
85/** ROM protection (8-bit). */
86#define PGM_STATE_REC_ROM_PROT UINT8_C(0x07)
87/** Ballooned page. No data. */
88#define PGM_STATE_REC_RAM_BALLOONED UINT8_C(0x08)
89/** The last record type. */
90#define PGM_STATE_REC_LAST PGM_STATE_REC_RAM_BALLOONED
91/** End marker. */
92#define PGM_STATE_REC_END UINT8_C(0xff)
93/** Flag indicating that the data is preceded by the page address.
94 * For RAW pages this is a RTGCPHYS. For MMIO2 and ROM pages this is a 8-bit
95 * range ID and a 32-bit page index.
96 */
97#define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80)
98/** @} */
99
100/** The CRC-32 for a zero page. */
101#define PGM_STATE_CRC32_ZERO_PAGE UINT32_C(0xc71c0011)
102/** The CRC-32 for a zero half page. */
103#define PGM_STATE_CRC32_ZERO_HALF_PAGE UINT32_C(0xf1e8ba9e)
104
105
106
107/** @name Old Page types used in older saved states.
108 * @{ */
109/** Old saved state: The usual invalid zero entry. */
110#define PGMPAGETYPE_OLD_INVALID 0
111/** Old saved state: RAM page. (RWX) */
112#define PGMPAGETYPE_OLD_RAM 1
113/** Old saved state: MMIO2 page. (RWX) */
114#define PGMPAGETYPE_OLD_MMIO2 1
115/** Old saved state: MMIO2 page aliased over an MMIO page. (RWX)
116 * See PGMHandlerPhysicalPageAlias(). */
117#define PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO 2
118/** Old saved state: Shadowed ROM. (RWX) */
119#define PGMPAGETYPE_OLD_ROM_SHADOW 3
120/** Old saved state: ROM page. (R-X) */
121#define PGMPAGETYPE_OLD_ROM 4
122/** Old saved state: MMIO page. (---) */
123#define PGMPAGETYPE_OLD_MMIO 5
124/** @} */
125
126
127/*********************************************************************************************************************************
128* Structures and Typedefs *
129*********************************************************************************************************************************/
130/** For loading old saved states. (pre-smp) */
131typedef struct
132{
133 /** If set no conflict checks are required. (boolean) */
134 bool fMappingsFixed;
135 /** Size of fixed mapping */
136 uint32_t cbMappingFixed;
137 /** Base address (GC) of fixed mapping */
138 RTGCPTR GCPtrMappingFixed;
139 /** A20 gate mask.
140 * Our current approach to A20 emulation is to let REM do it and don't bother
141 * anywhere else. The interesting guests will be operating with it enabled anyway.
142 * But should the need arise, we'll subject physical addresses to this mask. */
143 RTGCPHYS GCPhysA20Mask;
144 /** A20 gate state - boolean! */
145 bool fA20Enabled;
146 /** The guest paging mode. */
147 PGMMODE enmGuestMode;
148} PGMOLD;
149
150
151/*********************************************************************************************************************************
152* Global Variables *
153*********************************************************************************************************************************/
154/** PGM fields to save/load. */
155
156static const SSMFIELD s_aPGMFields[] =
157{
158 SSMFIELD_ENTRY( PGM, fMappingsFixed),
159 SSMFIELD_ENTRY_GCPTR( PGM, GCPtrMappingFixed),
160 SSMFIELD_ENTRY( PGM, cbMappingFixed),
161 SSMFIELD_ENTRY( PGM, cBalloonedPages),
162 SSMFIELD_ENTRY_TERM()
163};
164
165static const SSMFIELD s_aPGMFieldsPreBalloon[] =
166{
167 SSMFIELD_ENTRY( PGM, fMappingsFixed),
168 SSMFIELD_ENTRY_GCPTR( PGM, GCPtrMappingFixed),
169 SSMFIELD_ENTRY( PGM, cbMappingFixed),
170 SSMFIELD_ENTRY_TERM()
171};
172
173static const SSMFIELD s_aPGMCpuFields[] =
174{
175 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
176 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
177 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
178 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[0]),
179 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[1]),
180 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[2]),
181 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[3]),
182 SSMFIELD_ENTRY_TERM()
183};
184
185static const SSMFIELD s_aPGMCpuFieldsPrePae[] =
186{
187 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
188 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
189 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
190 SSMFIELD_ENTRY_TERM()
191};
192
193static const SSMFIELD s_aPGMFields_Old[] =
194{
195 SSMFIELD_ENTRY( PGMOLD, fMappingsFixed),
196 SSMFIELD_ENTRY_GCPTR( PGMOLD, GCPtrMappingFixed),
197 SSMFIELD_ENTRY( PGMOLD, cbMappingFixed),
198 SSMFIELD_ENTRY( PGMOLD, fA20Enabled),
199 SSMFIELD_ENTRY_GCPHYS( PGMOLD, GCPhysA20Mask),
200 SSMFIELD_ENTRY( PGMOLD, enmGuestMode),
201 SSMFIELD_ENTRY_TERM()
202};
203
204
205/**
206 * Find the ROM tracking structure for the given page.
207 *
208 * @returns Pointer to the ROM page structure. NULL if the caller didn't check
209 * that it's a ROM page.
210 * @param pVM The cross context VM structure.
211 * @param GCPhys The address of the ROM page.
212 */
213static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
214{
215 for (PPGMROMRANGE pRomRange = pVM->pgm.s.CTX_SUFF(pRomRanges);
216 pRomRange;
217 pRomRange = pRomRange->CTX_SUFF(pNext))
218 {
219 RTGCPHYS off = GCPhys - pRomRange->GCPhys;
220 if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
221 return &pRomRange->aPages[off >> PAGE_SHIFT];
222 }
223 return NULL;
224}
225
226
227/**
228 * Prepares the ROM pages for a live save.
229 *
230 * @returns VBox status code.
231 * @param pVM The cross context VM structure.
232 */
233static int pgmR3PrepRomPages(PVM pVM)
234{
235 /*
236 * Initialize the live save tracking in the ROM page descriptors.
237 */
238 pgmLock(pVM);
239 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
240 {
241 PPGMRAMRANGE pRamHint = NULL;;
242 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
243
244 for (uint32_t iPage = 0; iPage < cPages; iPage++)
245 {
246 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)PGMROMPROT_INVALID;
247 pRom->aPages[iPage].LiveSave.fWrittenTo = false;
248 pRom->aPages[iPage].LiveSave.fDirty = true;
249 pRom->aPages[iPage].LiveSave.fDirtiedRecently = true;
250 if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
251 {
252 if (PGMROMPROT_IS_ROM(pRom->aPages[iPage].enmProt))
253 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
254 else
255 {
256 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
257 PPGMPAGE pPage;
258 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
259 AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys));
260 if (RT_SUCCESS(rc))
261 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(pPage) && !PGM_PAGE_IS_BALLOONED(pPage);
262 else
263 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
264 }
265 }
266 }
267
268 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
269 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
270 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
271 }
272 pgmUnlock(pVM);
273
274 return VINF_SUCCESS;
275}
276
277
278/**
279 * Assigns IDs to the ROM ranges and saves them.
280 *
281 * @returns VBox status code.
282 * @param pVM The cross context VM structure.
283 * @param pSSM Saved state handle.
284 */
285static int pgmR3SaveRomRanges(PVM pVM, PSSMHANDLE pSSM)
286{
287 pgmLock(pVM);
288 uint8_t id = 1;
289 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3, id++)
290 {
291 pRom->idSavedState = id;
292 SSMR3PutU8(pSSM, id);
293 SSMR3PutStrZ(pSSM, ""); /* device name */
294 SSMR3PutU32(pSSM, 0); /* device instance */
295 SSMR3PutU8(pSSM, 0); /* region */
296 SSMR3PutStrZ(pSSM, pRom->pszDesc);
297 SSMR3PutGCPhys(pSSM, pRom->GCPhys);
298 int rc = SSMR3PutGCPhys(pSSM, pRom->cb);
299 if (RT_FAILURE(rc))
300 break;
301 }
302 pgmUnlock(pVM);
303 return SSMR3PutU8(pSSM, UINT8_MAX);
304}
305
306
307/**
308 * Loads the ROM range ID assignments.
309 *
310 * @returns VBox status code.
311 *
312 * @param pVM The cross context VM structure.
313 * @param pSSM The saved state handle.
314 */
315static int pgmR3LoadRomRanges(PVM pVM, PSSMHANDLE pSSM)
316{
317 PGM_LOCK_ASSERT_OWNER(pVM);
318
319 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
320 pRom->idSavedState = UINT8_MAX;
321
322 for (;;)
323 {
324 /*
325 * Read the data.
326 */
327 uint8_t id;
328 int rc = SSMR3GetU8(pSSM, &id);
329 if (RT_FAILURE(rc))
330 return rc;
331 if (id == UINT8_MAX)
332 {
333 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
334 AssertLogRelMsg(pRom->idSavedState != UINT8_MAX,
335 ("The \"%s\" ROM was not found in the saved state. Probably due to some misconfiguration\n",
336 pRom->pszDesc));
337 return VINF_SUCCESS; /* the end */
338 }
339 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
340
341 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
342 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
343 AssertLogRelRCReturn(rc, rc);
344
345 uint32_t uInstance;
346 SSMR3GetU32(pSSM, &uInstance);
347 uint8_t iRegion;
348 SSMR3GetU8(pSSM, &iRegion);
349
350 char szDesc[64];
351 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
352 AssertLogRelRCReturn(rc, rc);
353
354 RTGCPHYS GCPhys;
355 SSMR3GetGCPhys(pSSM, &GCPhys);
356 RTGCPHYS cb;
357 rc = SSMR3GetGCPhys(pSSM, &cb);
358 if (RT_FAILURE(rc))
359 return rc;
360 AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
361 AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
362
363 /*
364 * Locate a matching ROM range.
365 */
366 AssertLogRelMsgReturn( uInstance == 0
367 && iRegion == 0
368 && szDevName[0] == '\0',
369 ("GCPhys=%RGp %s\n", GCPhys, szDesc),
370 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
371 PPGMROMRANGE pRom;
372 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
373 {
374 if ( pRom->idSavedState == UINT8_MAX
375 && !strcmp(pRom->pszDesc, szDesc))
376 {
377 pRom->idSavedState = id;
378 break;
379 }
380 }
381 if (!pRom)
382 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("ROM at %RGp by the name '%s' was not found"), GCPhys, szDesc);
383 } /* forever */
384}
385
386
387/**
388 * Scan ROM pages.
389 *
390 * @param pVM The cross context VM structure.
391 */
392static void pgmR3ScanRomPages(PVM pVM)
393{
394 /*
395 * The shadow ROMs.
396 */
397 pgmLock(pVM);
398 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
399 {
400 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
401 {
402 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
403 for (uint32_t iPage = 0; iPage < cPages; iPage++)
404 {
405 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
406 if (pRomPage->LiveSave.fWrittenTo)
407 {
408 pRomPage->LiveSave.fWrittenTo = false;
409 if (!pRomPage->LiveSave.fDirty)
410 {
411 pRomPage->LiveSave.fDirty = true;
412 pVM->pgm.s.LiveSave.Rom.cReadyPages--;
413 pVM->pgm.s.LiveSave.Rom.cDirtyPages++;
414 }
415 pRomPage->LiveSave.fDirtiedRecently = true;
416 }
417 else
418 pRomPage->LiveSave.fDirtiedRecently = false;
419 }
420 }
421 }
422 pgmUnlock(pVM);
423}
424
425
426/**
427 * Takes care of the virgin ROM pages in the first pass.
428 *
429 * This is an attempt at simplifying the handling of ROM pages a little bit.
430 * This ASSUMES that no new ROM ranges will be added and that they won't be
431 * relinked in any way.
432 *
433 * @param pVM The cross context VM structure.
434 * @param pSSM The SSM handle.
435 * @param fLiveSave Whether we're in a live save or not.
436 */
437static int pgmR3SaveRomVirginPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
438{
439 pgmLock(pVM);
440 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
441 {
442 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
443 for (uint32_t iPage = 0; iPage < cPages; iPage++)
444 {
445 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
446 PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;
447
448 /* Get the virgin page descriptor. */
449 PPGMPAGE pPage;
450 if (PGMROMPROT_IS_ROM(enmProt))
451 pPage = pgmPhysGetPage(pVM, GCPhys);
452 else
453 pPage = &pRom->aPages[iPage].Virgin;
454
455 /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
456 int rc = VINF_SUCCESS;
457 char abPage[PAGE_SIZE];
458 if ( !PGM_PAGE_IS_ZERO(pPage)
459 && !PGM_PAGE_IS_BALLOONED(pPage))
460 {
461 void const *pvPage;
462 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
463 if (RT_SUCCESS(rc))
464 memcpy(abPage, pvPage, PAGE_SIZE);
465 }
466 else
467 ASMMemZeroPage(abPage);
468 pgmUnlock(pVM);
469 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
470
471 /* Save it. */
472 if (iPage > 0)
473 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
474 else
475 {
476 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
477 SSMR3PutU8(pSSM, pRom->idSavedState);
478 SSMR3PutU32(pSSM, iPage);
479 }
480 SSMR3PutU8(pSSM, (uint8_t)enmProt);
481 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
482 if (RT_FAILURE(rc))
483 return rc;
484
485 /* Update state. */
486 pgmLock(pVM);
487 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
488 if (fLiveSave)
489 {
490 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
491 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
492 pVM->pgm.s.LiveSave.cSavedPages++;
493 }
494 }
495 }
496 pgmUnlock(pVM);
497 return VINF_SUCCESS;
498}
499
500
501/**
502 * Saves dirty pages in the shadowed ROM ranges.
503 *
504 * Used by pgmR3LiveExecPart2 and pgmR3SaveExecMemory.
505 *
506 * @returns VBox status code.
507 * @param pVM The cross context VM structure.
508 * @param pSSM The SSM handle.
509 * @param fLiveSave Whether it's a live save or not.
510 * @param fFinalPass Whether this is the final pass or not.
511 */
512static int pgmR3SaveShadowedRomPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
513{
514 /*
515 * The Shadowed ROMs.
516 *
517 * ASSUMES that the ROM ranges are fixed.
518 * ASSUMES that all the ROM ranges are mapped.
519 */
520 pgmLock(pVM);
521 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
522 {
523 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
524 {
525 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
526 uint32_t iPrevPage = cPages;
527 for (uint32_t iPage = 0; iPage < cPages; iPage++)
528 {
529 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
530 if ( !fLiveSave
531 || ( pRomPage->LiveSave.fDirty
532 && ( ( !pRomPage->LiveSave.fDirtiedRecently
533 && !pRomPage->LiveSave.fWrittenTo)
534 || fFinalPass
535 )
536 )
537 )
538 {
539 uint8_t abPage[PAGE_SIZE];
540 PGMROMPROT enmProt = pRomPage->enmProt;
541 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
542 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(pVM, GCPhys);
543 bool fZero = PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_BALLOONED(pPage); Assert(!PGM_PAGE_IS_BALLOONED(pPage)); /* Shouldn't be ballooned. */
544 int rc = VINF_SUCCESS;
545 if (!fZero)
546 {
547 void const *pvPage;
548 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
549 if (RT_SUCCESS(rc))
550 memcpy(abPage, pvPage, PAGE_SIZE);
551 }
552 if (fLiveSave && RT_SUCCESS(rc))
553 {
554 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
555 pRomPage->LiveSave.fDirty = false;
556 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
557 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
558 pVM->pgm.s.LiveSave.cSavedPages++;
559 }
560 pgmUnlock(pVM);
561 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
562
563 if (iPage - 1U == iPrevPage && iPage > 0)
564 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW));
565 else
566 {
567 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW) | PGM_STATE_REC_FLAG_ADDR);
568 SSMR3PutU8(pSSM, pRom->idSavedState);
569 SSMR3PutU32(pSSM, iPage);
570 }
571 rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
572 if (!fZero)
573 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
574 if (RT_FAILURE(rc))
575 return rc;
576
577 pgmLock(pVM);
578 iPrevPage = iPage;
579 }
580 /*
581 * In the final pass, make sure the protection is in sync.
582 */
583 else if ( fFinalPass
584 && pRomPage->LiveSave.u8Prot != pRomPage->enmProt)
585 {
586 PGMROMPROT enmProt = pRomPage->enmProt;
587 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
588 pgmUnlock(pVM);
589
590 if (iPage - 1U == iPrevPage && iPage > 0)
591 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT);
592 else
593 {
594 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT | PGM_STATE_REC_FLAG_ADDR);
595 SSMR3PutU8(pSSM, pRom->idSavedState);
596 SSMR3PutU32(pSSM, iPage);
597 }
598 int rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
599 if (RT_FAILURE(rc))
600 return rc;
601
602 pgmLock(pVM);
603 iPrevPage = iPage;
604 }
605 }
606 }
607 }
608 pgmUnlock(pVM);
609 return VINF_SUCCESS;
610}
611
612
613/**
614 * Cleans up ROM pages after a live save.
615 *
616 * @param pVM The cross context VM structure.
617 */
618static void pgmR3DoneRomPages(PVM pVM)
619{
620 NOREF(pVM);
621}
622
623
624/**
625 * Prepares the MMIO2 pages for a live save.
626 *
627 * @returns VBox status code.
628 * @param pVM The cross context VM structure.
629 */
630static int pgmR3PrepMmio2Pages(PVM pVM)
631{
632 /*
633 * Initialize the live save tracking in the MMIO2 ranges.
634 * ASSUME nothing changes here.
635 */
636 pgmLock(pVM);
637 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
638 {
639 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
640 {
641 uint32_t const cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT;
642 pgmUnlock(pVM);
643
644 PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
645 if (!paLSPages)
646 return VERR_NO_MEMORY;
647 for (uint32_t iPage = 0; iPage < cPages; iPage++)
648 {
649 /* Initialize it as a dirty zero page. */
650 paLSPages[iPage].fDirty = true;
651 paLSPages[iPage].cUnchangedScans = 0;
652 paLSPages[iPage].fZero = true;
653 paLSPages[iPage].u32CrcH1 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
654 paLSPages[iPage].u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
655 }
656
657 pgmLock(pVM);
658 pRegMmio->paLSPages = paLSPages;
659 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
660 }
661 }
662 pgmUnlock(pVM);
663 return VINF_SUCCESS;
664}
665
666
667/**
668 * Assigns IDs to the MMIO2 ranges and saves them.
669 *
670 * @returns VBox status code.
671 * @param pVM The cross context VM structure.
672 * @param pSSM Saved state handle.
673 */
674static int pgmR3SaveMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
675{
676 pgmLock(pVM);
677 uint8_t id = 1;
678 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
679 {
680 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
681 {
682 pRegMmio->idSavedState = id;
683 SSMR3PutU8(pSSM, id);
684 SSMR3PutStrZ(pSSM, pRegMmio->pDevInsR3->pReg->szName);
685 SSMR3PutU32(pSSM, pRegMmio->pDevInsR3->iInstance);
686 SSMR3PutU8(pSSM, pRegMmio->iRegion);
687 SSMR3PutStrZ(pSSM, pRegMmio->RamRange.pszDesc);
688 int rc = SSMR3PutGCPhys(pSSM, pRegMmio->RamRange.cb);
689 if (RT_FAILURE(rc))
690 break;
691 id++;
692 }
693 }
694 pgmUnlock(pVM);
695 return SSMR3PutU8(pSSM, UINT8_MAX);
696}
697
698
699/**
700 * Loads the MMIO2 range ID assignments.
701 *
702 * @returns VBox status code.
703 *
704 * @param pVM The cross context VM structure.
705 * @param pSSM The saved state handle.
706 */
707static int pgmR3LoadMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
708{
709 PGM_LOCK_ASSERT_OWNER(pVM);
710
711 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
712 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
713 pRegMmio->idSavedState = UINT8_MAX;
714
715 for (;;)
716 {
717 /*
718 * Read the data.
719 */
720 uint8_t id;
721 int rc = SSMR3GetU8(pSSM, &id);
722 if (RT_FAILURE(rc))
723 return rc;
724 if (id == UINT8_MAX)
725 {
726 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
727 AssertLogRelMsg( pRegMmio->idSavedState != UINT8_MAX
728 || !(pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2),
729 ("%s\n", pRegMmio->RamRange.pszDesc));
730 return VINF_SUCCESS; /* the end */
731 }
732 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
733
734 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
735 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
736 AssertLogRelRCReturn(rc, rc);
737
738 uint32_t uInstance;
739 SSMR3GetU32(pSSM, &uInstance);
740 uint8_t iRegion;
741 SSMR3GetU8(pSSM, &iRegion);
742
743 char szDesc[64];
744 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
745 AssertLogRelRCReturn(rc, rc);
746
747 RTGCPHYS cb;
748 rc = SSMR3GetGCPhys(pSSM, &cb);
749 AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
750
751 /*
752 * Locate a matching MMIO2 range.
753 */
754 PPGMREGMMIORANGE pRegMmio;
755 for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
756 {
757 if ( pRegMmio->idSavedState == UINT8_MAX
758 && pRegMmio->iRegion == iRegion
759 && pRegMmio->pDevInsR3->iInstance == uInstance
760 && (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
761 && !strcmp(pRegMmio->pDevInsR3->pReg->szName, szDevName))
762 {
763 pRegMmio->idSavedState = id;
764 break;
765 }
766 }
767 if (!pRegMmio)
768 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to locate a MMIO2 range called '%s' owned by %s/%u, region %d"),
769 szDesc, szDevName, uInstance, iRegion);
770
771 /*
772 * Validate the configuration, the size of the MMIO2 region should be
773 * the same.
774 */
775 if (cb != pRegMmio->RamRange.cb)
776 {
777 LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n",
778 pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb));
779 if (cb > pRegMmio->RamRange.cb) /* bad idea? */
780 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp"),
781 pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb);
782 }
783 } /* forever */
784}
785
786
787/**
788 * Scans one MMIO2 page.
789 *
790 * @returns True if changed, false if unchanged.
791 *
792 * @param pVM The cross context VM structure.
793 * @param pbPage The page bits.
794 * @param pLSPage The live save tracking structure for the page.
795 *
796 */
797DECLINLINE(bool) pgmR3ScanMmio2Page(PVM pVM, uint8_t const *pbPage, PPGMLIVESAVEMMIO2PAGE pLSPage)
798{
799 /*
800 * Special handling of zero pages.
801 */
802 bool const fZero = pLSPage->fZero;
803 if (fZero)
804 {
805 if (ASMMemIsZeroPage(pbPage))
806 {
807 /* Not modified. */
808 if (pLSPage->fDirty)
809 pLSPage->cUnchangedScans++;
810 return false;
811 }
812
813 pLSPage->fZero = false;
814 pLSPage->u32CrcH1 = RTCrc32(pbPage, PAGE_SIZE / 2);
815 }
816 else
817 {
818 /*
819 * CRC the first half, if it doesn't match the page is dirty and
820 * we won't check the 2nd half (we'll do that next time).
821 */
822 uint32_t u32CrcH1 = RTCrc32(pbPage, PAGE_SIZE / 2);
823 if (u32CrcH1 == pLSPage->u32CrcH1)
824 {
825 uint32_t u32CrcH2 = RTCrc32(pbPage + PAGE_SIZE / 2, PAGE_SIZE / 2);
826 if (u32CrcH2 == pLSPage->u32CrcH2)
827 {
828 /* Probably not modified. */
829 if (pLSPage->fDirty)
830 pLSPage->cUnchangedScans++;
831 return false;
832 }
833
834 pLSPage->u32CrcH2 = u32CrcH2;
835 }
836 else
837 {
838 pLSPage->u32CrcH1 = u32CrcH1;
839 if ( u32CrcH1 == PGM_STATE_CRC32_ZERO_HALF_PAGE
840 && ASMMemIsZeroPage(pbPage))
841 {
842 pLSPage->u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
843 pLSPage->fZero = true;
844 }
845 }
846 }
847
848 /* dirty page path */
849 pLSPage->cUnchangedScans = 0;
850 if (!pLSPage->fDirty)
851 {
852 pLSPage->fDirty = true;
853 pVM->pgm.s.LiveSave.Mmio2.cReadyPages--;
854 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages++;
855 if (fZero)
856 pVM->pgm.s.LiveSave.Mmio2.cZeroPages--;
857 }
858 return true;
859}
860
861
862/**
863 * Scan for MMIO2 page modifications.
864 *
865 * @param pVM The cross context VM structure.
866 * @param uPass The pass number.
867 */
868static void pgmR3ScanMmio2Pages(PVM pVM, uint32_t uPass)
869{
870 /*
871 * Since this is a bit expensive we lower the scan rate after a little while.
872 */
873 if ( ( (uPass & 3) != 0
874 && uPass > 10)
875 || uPass == SSM_PASS_FINAL)
876 return;
877
878 pgmLock(pVM); /* paranoia */
879 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
880 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
881 {
882 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
883 uint32_t cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT;
884 pgmUnlock(pVM);
885
886 for (uint32_t iPage = 0; iPage < cPages; iPage++)
887 {
888 uint8_t const *pbPage = (uint8_t const *)pRegMmio->pvR3 + iPage * PAGE_SIZE;
889 pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]);
890 }
891
892 pgmLock(pVM);
893 }
894 pgmUnlock(pVM);
895
896}
897
898
899/**
900 * Save quiescent MMIO2 pages.
901 *
902 * @returns VBox status code.
903 * @param pVM The cross context VM structure.
904 * @param pSSM The SSM handle.
905 * @param fLiveSave Whether it's a live save or not.
906 * @param uPass The pass number.
907 */
908static int pgmR3SaveMmio2Pages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
909{
910 /** @todo implement live saving of MMIO2 pages. (Need some way of telling the
911 * device that we wish to know about changes.) */
912
913 int rc = VINF_SUCCESS;
914 if (uPass == SSM_PASS_FINAL)
915 {
916 /*
917 * The mop up round.
918 */
919 pgmLock(pVM);
920 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3;
921 pRegMmio && RT_SUCCESS(rc);
922 pRegMmio = pRegMmio->pNextR3)
923 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
924 {
925 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
926 uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3;
927 uint32_t cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT;
928 uint32_t iPageLast = cPages;
929 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
930 {
931 uint8_t u8Type;
932 if (!fLiveSave)
933 u8Type = ASMMemIsZeroPage(pbPage) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
934 else
935 {
936 /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
937 if ( !paLSPages[iPage].fDirty
938 && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
939 {
940 if (paLSPages[iPage].fZero)
941 continue;
942
943 uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
944 RTSha1(pbPage, PAGE_SIZE, abSha1Hash);
945 if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
946 continue;
947 }
948 u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
949 pVM->pgm.s.LiveSave.cSavedPages++;
950 }
951
952 if (iPage != 0 && iPage == iPageLast + 1)
953 rc = SSMR3PutU8(pSSM, u8Type);
954 else
955 {
956 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
957 SSMR3PutU8(pSSM, pRegMmio->idSavedState);
958 rc = SSMR3PutU32(pSSM, iPage);
959 }
960 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
961 rc = SSMR3PutMem(pSSM, pbPage, PAGE_SIZE);
962 if (RT_FAILURE(rc))
963 break;
964 iPageLast = iPage;
965 }
966 }
967 pgmUnlock(pVM);
968 }
969 /*
970 * Reduce the rate after a little while since the current MMIO2 approach is
971 * a bit expensive.
972 * We position it two passes after the scan pass to avoid saving busy pages.
973 */
974 else if ( uPass <= 10
975 || (uPass & 3) == 2)
976 {
977 pgmLock(pVM);
978 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3;
979 pRegMmio && RT_SUCCESS(rc);
980 pRegMmio = pRegMmio->pNextR3)
981 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
982 {
983 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
984 uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3;
985 uint32_t cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT;
986 uint32_t iPageLast = cPages;
987 pgmUnlock(pVM);
988
989 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
990 {
991 /* Skip clean pages and pages which hasn't quiesced. */
992 if (!paLSPages[iPage].fDirty)
993 continue;
994 if (paLSPages[iPage].cUnchangedScans < 3)
995 continue;
996 if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
997 continue;
998
999 /* Save it. */
1000 bool const fZero = paLSPages[iPage].fZero;
1001 uint8_t abPage[PAGE_SIZE];
1002 if (!fZero)
1003 {
1004 memcpy(abPage, pbPage, PAGE_SIZE);
1005 RTSha1(abPage, PAGE_SIZE, paLSPages[iPage].abSha1Saved);
1006 }
1007
1008 uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
1009 if (iPage != 0 && iPage == iPageLast + 1)
1010 rc = SSMR3PutU8(pSSM, u8Type);
1011 else
1012 {
1013 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
1014 SSMR3PutU8(pSSM, pRegMmio->idSavedState);
1015 rc = SSMR3PutU32(pSSM, iPage);
1016 }
1017 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
1018 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
1019 if (RT_FAILURE(rc))
1020 break;
1021
1022 /* Housekeeping. */
1023 paLSPages[iPage].fDirty = false;
1024 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
1025 pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
1026 if (u8Type == PGM_STATE_REC_MMIO2_ZERO)
1027 pVM->pgm.s.LiveSave.Mmio2.cZeroPages++;
1028 pVM->pgm.s.LiveSave.cSavedPages++;
1029 iPageLast = iPage;
1030 }
1031
1032 pgmLock(pVM);
1033 }
1034 pgmUnlock(pVM);
1035 }
1036
1037 return rc;
1038}
1039
1040
1041/**
1042 * Cleans up MMIO2 pages after a live save.
1043 *
1044 * @param pVM The cross context VM structure.
1045 */
1046static void pgmR3DoneMmio2Pages(PVM pVM)
1047{
1048 /*
1049 * Free the tracking structures for the MMIO2 pages.
1050 * We do the freeing outside the lock in case the VM is running.
1051 */
1052 pgmLock(pVM);
1053 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
1054 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
1055 {
1056 void *pvMmio2ToFree = pRegMmio->paLSPages;
1057 if (pvMmio2ToFree)
1058 {
1059 pRegMmio->paLSPages = NULL;
1060 pgmUnlock(pVM);
1061 MMR3HeapFree(pvMmio2ToFree);
1062 pgmLock(pVM);
1063 }
1064 }
1065 pgmUnlock(pVM);
1066}
1067
1068
1069/**
1070 * Prepares the RAM pages for a live save.
1071 *
1072 * @returns VBox status code.
1073 * @param pVM The cross context VM structure.
1074 */
1075static int pgmR3PrepRamPages(PVM pVM)
1076{
1077
1078 /*
1079 * Try allocating tracking structures for the ram ranges.
1080 *
1081 * To avoid lock contention, we leave the lock every time we're allocating
1082 * a new array. This means we'll have to ditch the allocation and start
1083 * all over again if the RAM range list changes in-between.
1084 *
1085 * Note! pgmR3SaveDone will always be called and it is therefore responsible
1086 * for cleaning up.
1087 */
1088 PPGMRAMRANGE pCur;
1089 pgmLock(pVM);
1090 do
1091 {
1092 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1093 {
1094 if ( !pCur->paLSPages
1095 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1096 {
1097 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1098 uint32_t const cPages = pCur->cb >> PAGE_SHIFT;
1099 pgmUnlock(pVM);
1100 PPGMLIVESAVERAMPAGE paLSPages = (PPGMLIVESAVERAMPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVERAMPAGE));
1101 if (!paLSPages)
1102 return VERR_NO_MEMORY;
1103 pgmLock(pVM);
1104 if (pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1105 {
1106 pgmUnlock(pVM);
1107 MMR3HeapFree(paLSPages);
1108 pgmLock(pVM);
1109 break; /* try again */
1110 }
1111 pCur->paLSPages = paLSPages;
1112
1113 /*
1114 * Initialize the array.
1115 */
1116 uint32_t iPage = cPages;
1117 while (iPage-- > 0)
1118 {
1119 /** @todo yield critsect! (after moving this away from EMT0) */
1120 PCPGMPAGE pPage = &pCur->aPages[iPage];
1121 paLSPages[iPage].cDirtied = 0;
1122 paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */
1123 paLSPages[iPage].fWriteMonitored = 0;
1124 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1125 paLSPages[iPage].u2Reserved = 0;
1126 switch (PGM_PAGE_GET_TYPE(pPage))
1127 {
1128 case PGMPAGETYPE_RAM:
1129 if ( PGM_PAGE_IS_ZERO(pPage)
1130 || PGM_PAGE_IS_BALLOONED(pPage))
1131 {
1132 paLSPages[iPage].fZero = 1;
1133 paLSPages[iPage].fShared = 0;
1134#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1135 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1136#endif
1137 }
1138 else if (PGM_PAGE_IS_SHARED(pPage))
1139 {
1140 paLSPages[iPage].fZero = 0;
1141 paLSPages[iPage].fShared = 1;
1142#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1143 paLSPages[iPage].u32Crc = UINT32_MAX;
1144#endif
1145 }
1146 else
1147 {
1148 paLSPages[iPage].fZero = 0;
1149 paLSPages[iPage].fShared = 0;
1150#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1151 paLSPages[iPage].u32Crc = UINT32_MAX;
1152#endif
1153 }
1154 paLSPages[iPage].fIgnore = 0;
1155 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1156 break;
1157
1158 case PGMPAGETYPE_ROM_SHADOW:
1159 case PGMPAGETYPE_ROM:
1160 {
1161 paLSPages[iPage].fZero = 0;
1162 paLSPages[iPage].fShared = 0;
1163 paLSPages[iPage].fDirty = 0;
1164 paLSPages[iPage].fIgnore = 1;
1165#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1166 paLSPages[iPage].u32Crc = UINT32_MAX;
1167#endif
1168 pVM->pgm.s.LiveSave.cIgnoredPages++;
1169 break;
1170 }
1171
1172 default:
1173 AssertMsgFailed(("%R[pgmpage]", pPage));
1174 RT_FALL_THRU();
1175 case PGMPAGETYPE_MMIO2:
1176 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1177 paLSPages[iPage].fZero = 0;
1178 paLSPages[iPage].fShared = 0;
1179 paLSPages[iPage].fDirty = 0;
1180 paLSPages[iPage].fIgnore = 1;
1181#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1182 paLSPages[iPage].u32Crc = UINT32_MAX;
1183#endif
1184 pVM->pgm.s.LiveSave.cIgnoredPages++;
1185 break;
1186
1187 case PGMPAGETYPE_MMIO:
1188 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
1189 paLSPages[iPage].fZero = 0;
1190 paLSPages[iPage].fShared = 0;
1191 paLSPages[iPage].fDirty = 0;
1192 paLSPages[iPage].fIgnore = 1;
1193#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1194 paLSPages[iPage].u32Crc = UINT32_MAX;
1195#endif
1196 pVM->pgm.s.LiveSave.cIgnoredPages++;
1197 break;
1198 }
1199 }
1200 }
1201 }
1202 } while (pCur);
1203 pgmUnlock(pVM);
1204
1205 return VINF_SUCCESS;
1206}
1207
1208
1209/**
1210 * Saves the RAM configuration.
1211 *
1212 * @returns VBox status code.
1213 * @param pVM The cross context VM structure.
1214 * @param pSSM The saved state handle.
1215 */
1216static int pgmR3SaveRamConfig(PVM pVM, PSSMHANDLE pSSM)
1217{
1218 uint32_t cbRamHole = 0;
1219 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
1220 AssertRCReturn(rc, rc);
1221
1222 uint64_t cbRam = 0;
1223 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
1224 AssertRCReturn(rc, rc);
1225
1226 SSMR3PutU32(pSSM, cbRamHole);
1227 return SSMR3PutU64(pSSM, cbRam);
1228}
1229
1230
1231/**
1232 * Loads and verifies the RAM configuration.
1233 *
1234 * @returns VBox status code.
1235 * @param pVM The cross context VM structure.
1236 * @param pSSM The saved state handle.
1237 */
1238static int pgmR3LoadRamConfig(PVM pVM, PSSMHANDLE pSSM)
1239{
1240 uint32_t cbRamHoleCfg = 0;
1241 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHoleCfg, MM_RAM_HOLE_SIZE_DEFAULT);
1242 AssertRCReturn(rc, rc);
1243
1244 uint64_t cbRamCfg = 0;
1245 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRamCfg, 0);
1246 AssertRCReturn(rc, rc);
1247
1248 uint32_t cbRamHoleSaved;
1249 SSMR3GetU32(pSSM, &cbRamHoleSaved);
1250
1251 uint64_t cbRamSaved;
1252 rc = SSMR3GetU64(pSSM, &cbRamSaved);
1253 AssertRCReturn(rc, rc);
1254
1255 if ( cbRamHoleCfg != cbRamHoleSaved
1256 || cbRamCfg != cbRamSaved)
1257 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Ram config mismatch: saved=%RX64/%RX32 config=%RX64/%RX32 (RAM/Hole)"),
1258 cbRamSaved, cbRamHoleSaved, cbRamCfg, cbRamHoleCfg);
1259 return VINF_SUCCESS;
1260}
1261
1262#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1263
1264/**
1265 * Calculates the CRC-32 for a RAM page and updates the live save page tracking
1266 * info with it.
1267 *
1268 * @param pVM The cross context VM structure.
1269 * @param pCur The current RAM range.
1270 * @param paLSPages The current array of live save page tracking
1271 * structures.
1272 * @param iPage The page index.
1273 */
1274static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
1275{
1276 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1277 PGMPAGEMAPLOCK PgMpLck;
1278 void const *pvPage;
1279 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1280 if (RT_SUCCESS(rc))
1281 {
1282 paLSPages[iPage].u32Crc = RTCrc32(pvPage, PAGE_SIZE);
1283 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1284 }
1285 else
1286 paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */
1287}
1288
1289
1290/**
1291 * Verifies the CRC-32 for a page given it's raw bits.
1292 *
1293 * @param pvPage The page bits.
1294 * @param pCur The current RAM range.
1295 * @param paLSPages The current array of live save page tracking
1296 * structures.
1297 * @param iPage The page index.
1298 */
1299static void pgmR3StateVerifyCrc32ForPage(void const *pvPage, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1300{
1301 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1302 {
1303 uint32_t u32Crc = RTCrc32(pvPage, PAGE_SIZE);
1304 Assert( ( !PGM_PAGE_IS_ZERO(&pCur->aPages[iPage])
1305 && !PGM_PAGE_IS_BALLOONED(&pCur->aPages[iPage]))
1306 || u32Crc == PGM_STATE_CRC32_ZERO_PAGE);
1307 AssertMsg(paLSPages[iPage].u32Crc == u32Crc,
1308 ("%08x != %08x for %RGp %R[pgmpage] %s\n", paLSPages[iPage].u32Crc, u32Crc,
1309 pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pCur->aPages[iPage], pszWhere));
1310 }
1311}
1312
1313
1314/**
1315 * Verifies the CRC-32 for a RAM page.
1316 *
1317 * @param pVM The cross context VM structure.
1318 * @param pCur The current RAM range.
1319 * @param paLSPages The current array of live save page tracking
1320 * structures.
1321 * @param iPage The page index.
1322 */
1323static void pgmR3StateVerifyCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1324{
1325 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1326 {
1327 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1328 PGMPAGEMAPLOCK PgMpLck;
1329 void const *pvPage;
1330 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1331 if (RT_SUCCESS(rc))
1332 {
1333 pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage, pszWhere);
1334 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1335 }
1336 }
1337}
1338
1339#endif /* PGMLIVESAVERAMPAGE_WITH_CRC32 */
1340
1341/**
1342 * Scan for RAM page modifications and reprotect them.
1343 *
1344 * @param pVM The cross context VM structure.
1345 * @param fFinalPass Whether this is the final pass or not.
1346 */
1347static void pgmR3ScanRamPages(PVM pVM, bool fFinalPass)
1348{
1349 /*
1350 * The RAM.
1351 */
1352 RTGCPHYS GCPhysCur = 0;
1353 PPGMRAMRANGE pCur;
1354 pgmLock(pVM);
1355 do
1356 {
1357 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1358 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1359 {
1360 if ( pCur->GCPhysLast > GCPhysCur
1361 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1362 {
1363 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1364 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
1365 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
1366 GCPhysCur = 0;
1367 for (; iPage < cPages; iPage++)
1368 {
1369 /* Do yield first. */
1370 if ( !fFinalPass
1371#ifndef PGMLIVESAVERAMPAGE_WITH_CRC32
1372 && (iPage & 0x7ff) == 0x100
1373#endif
1374 && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX)
1375 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1376 {
1377 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1378 break; /* restart */
1379 }
1380
1381 /* Skip already ignored pages. */
1382 if (paLSPages[iPage].fIgnore)
1383 continue;
1384
1385 if (RT_LIKELY(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM))
1386 {
1387 /*
1388 * A RAM page.
1389 */
1390 switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
1391 {
1392 case PGM_PAGE_STATE_ALLOCATED:
1393 /** @todo Optimize this: Don't always re-enable write
1394 * monitoring if the page is known to be very busy. */
1395 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1396 {
1397 AssertMsg(paLSPages[iPage].fWriteMonitored,
1398 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pCur->aPages[iPage]));
1399 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1400 Assert(pVM->pgm.s.cWrittenToPages > 0);
1401 pVM->pgm.s.cWrittenToPages--;
1402 }
1403 else
1404 {
1405 AssertMsg(!paLSPages[iPage].fWriteMonitored,
1406 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pCur->aPages[iPage]));
1407 pVM->pgm.s.LiveSave.Ram.cMonitoredPages++;
1408 }
1409
1410 if (!paLSPages[iPage].fDirty)
1411 {
1412 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1413 if (paLSPages[iPage].fZero)
1414 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1415 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1416 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1417 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1418 }
1419
1420 pgmPhysPageWriteMonitor(pVM, &pCur->aPages[iPage],
1421 pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1422 paLSPages[iPage].fWriteMonitored = 1;
1423 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1424 paLSPages[iPage].fDirty = 1;
1425 paLSPages[iPage].fZero = 0;
1426 paLSPages[iPage].fShared = 0;
1427#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1428 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1429#endif
1430 break;
1431
1432 case PGM_PAGE_STATE_WRITE_MONITORED:
1433 Assert(paLSPages[iPage].fWriteMonitored);
1434 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) == 0)
1435 {
1436#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1437 if (paLSPages[iPage].fWriteMonitoredJustNow)
1438 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1439 else
1440 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "scan");
1441#endif
1442 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1443 }
1444 else
1445 {
1446 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1447#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1448 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1449#endif
1450 if (!paLSPages[iPage].fDirty)
1451 {
1452 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1453 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1454 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1455 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1456 }
1457 }
1458 break;
1459
1460 case PGM_PAGE_STATE_ZERO:
1461 case PGM_PAGE_STATE_BALLOONED:
1462 if (!paLSPages[iPage].fZero)
1463 {
1464 if (!paLSPages[iPage].fDirty)
1465 {
1466 paLSPages[iPage].fDirty = 1;
1467 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1468 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1469 }
1470 paLSPages[iPage].fZero = 1;
1471 paLSPages[iPage].fShared = 0;
1472#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1473 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1474#endif
1475 }
1476 break;
1477
1478 case PGM_PAGE_STATE_SHARED:
1479 if (!paLSPages[iPage].fShared)
1480 {
1481 if (!paLSPages[iPage].fDirty)
1482 {
1483 paLSPages[iPage].fDirty = 1;
1484 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1485 if (paLSPages[iPage].fZero)
1486 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1487 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1488 }
1489 paLSPages[iPage].fZero = 0;
1490 paLSPages[iPage].fShared = 1;
1491#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1492 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1493#endif
1494 }
1495 break;
1496 }
1497 }
1498 else
1499 {
1500 /*
1501 * All other types => Ignore the page.
1502 */
1503 Assert(!paLSPages[iPage].fIgnore); /* skipped before switch */
1504 paLSPages[iPage].fIgnore = 1;
1505 if (paLSPages[iPage].fWriteMonitored)
1506 {
1507 /** @todo this doesn't hold water when we start monitoring MMIO2 and ROM shadow
1508 * pages! */
1509 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
1510 {
1511 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
1512 PGM_PAGE_SET_STATE(pVM, &pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
1513 Assert(pVM->pgm.s.cMonitoredPages > 0);
1514 pVM->pgm.s.cMonitoredPages--;
1515 }
1516 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1517 {
1518 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1519 Assert(pVM->pgm.s.cWrittenToPages > 0);
1520 pVM->pgm.s.cWrittenToPages--;
1521 }
1522 pVM->pgm.s.LiveSave.Ram.cMonitoredPages--;
1523 }
1524
1525 /** @todo the counting doesn't quite work out here. fix later? */
1526 if (paLSPages[iPage].fDirty)
1527 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1528 else
1529 {
1530 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1531 if (paLSPages[iPage].fZero)
1532 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1533 }
1534 pVM->pgm.s.LiveSave.cIgnoredPages++;
1535 }
1536 } /* for each page in range */
1537
1538 if (GCPhysCur != 0)
1539 break; /* Yield + ramrange change */
1540 GCPhysCur = pCur->GCPhysLast;
1541 }
1542 } /* for each range */
1543 } while (pCur);
1544 pgmUnlock(pVM);
1545}
1546
1547
1548/**
1549 * Save quiescent RAM pages.
1550 *
1551 * @returns VBox status code.
1552 * @param pVM The cross context VM structure.
1553 * @param pSSM The SSM handle.
1554 * @param fLiveSave Whether it's a live save or not.
1555 * @param uPass The pass number.
1556 */
1557static int pgmR3SaveRamPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
1558{
1559 NOREF(fLiveSave);
1560
1561 /*
1562 * The RAM.
1563 */
1564 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
1565 RTGCPHYS GCPhysCur = 0;
1566 PPGMRAMRANGE pCur;
1567
1568 pgmLock(pVM);
1569 do
1570 {
1571 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1572 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1573 {
1574 if ( pCur->GCPhysLast > GCPhysCur
1575 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1576 {
1577 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1578 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
1579 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
1580 GCPhysCur = 0;
1581 for (; iPage < cPages; iPage++)
1582 {
1583 /* Do yield first. */
1584 if ( uPass != SSM_PASS_FINAL
1585 && (iPage & 0x7ff) == 0x100
1586 && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX)
1587 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1588 {
1589 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1590 break; /* restart */
1591 }
1592
1593 PPGMPAGE pCurPage = &pCur->aPages[iPage];
1594
1595 /*
1596 * Only save pages that haven't changed since last scan and are dirty.
1597 */
1598 if ( uPass != SSM_PASS_FINAL
1599 && paLSPages)
1600 {
1601 if (!paLSPages[iPage].fDirty)
1602 continue;
1603 if (paLSPages[iPage].fWriteMonitoredJustNow)
1604 continue;
1605 if (paLSPages[iPage].fIgnore)
1606 continue;
1607 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM) /* in case of recent remappings */
1608 continue;
1609 if ( PGM_PAGE_GET_STATE(pCurPage)
1610 != ( paLSPages[iPage].fZero
1611 ? PGM_PAGE_STATE_ZERO
1612 : paLSPages[iPage].fShared
1613 ? PGM_PAGE_STATE_SHARED
1614 : PGM_PAGE_STATE_WRITE_MONITORED))
1615 continue;
1616 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) > 0)
1617 continue;
1618 }
1619 else
1620 {
1621 if ( paLSPages
1622 && !paLSPages[iPage].fDirty
1623 && !paLSPages[iPage].fIgnore)
1624 {
1625#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1626 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1627 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#1");
1628#endif
1629 continue;
1630 }
1631 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1632 continue;
1633 }
1634
1635 /*
1636 * Do the saving outside the PGM critsect since SSM may block on I/O.
1637 */
1638 int rc;
1639 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1640 bool fZero = PGM_PAGE_IS_ZERO(pCurPage);
1641 bool fBallooned = PGM_PAGE_IS_BALLOONED(pCurPage);
1642 bool fSkipped = false;
1643
1644 if (!fZero && !fBallooned)
1645 {
1646 /*
1647 * Copy the page and then save it outside the lock (since any
1648 * SSM call may block).
1649 */
1650 uint8_t abPage[PAGE_SIZE];
1651 PGMPAGEMAPLOCK PgMpLck;
1652 void const *pvPage;
1653 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage, &PgMpLck);
1654 if (RT_SUCCESS(rc))
1655 {
1656 memcpy(abPage, pvPage, PAGE_SIZE);
1657#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1658 if (paLSPages)
1659 pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage, "save#3");
1660#endif
1661 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1662 }
1663 pgmUnlock(pVM);
1664 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
1665
1666 /* Try save some memory when restoring. */
1667 if (!ASMMemIsZeroPage(pvPage))
1668 {
1669 if (GCPhys == GCPhysLast + PAGE_SIZE)
1670 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
1671 else
1672 {
1673 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
1674 SSMR3PutGCPhys(pSSM, GCPhys);
1675 }
1676 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
1677 }
1678 else
1679 {
1680 if (GCPhys == GCPhysLast + PAGE_SIZE)
1681 rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO);
1682 else
1683 {
1684 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO | PGM_STATE_REC_FLAG_ADDR);
1685 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1686 }
1687 }
1688 }
1689 else
1690 {
1691 /*
1692 * Dirty zero or ballooned page.
1693 */
1694#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1695 if (paLSPages)
1696 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#2");
1697#endif
1698 pgmUnlock(pVM);
1699
1700 uint8_t u8RecType = fBallooned ? PGM_STATE_REC_RAM_BALLOONED : PGM_STATE_REC_RAM_ZERO;
1701 if (GCPhys == GCPhysLast + PAGE_SIZE)
1702 rc = SSMR3PutU8(pSSM, u8RecType);
1703 else
1704 {
1705 SSMR3PutU8(pSSM, u8RecType | PGM_STATE_REC_FLAG_ADDR);
1706 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1707 }
1708 }
1709 if (RT_FAILURE(rc))
1710 return rc;
1711
1712 pgmLock(pVM);
1713 if (!fSkipped)
1714 GCPhysLast = GCPhys;
1715 if (paLSPages)
1716 {
1717 paLSPages[iPage].fDirty = 0;
1718 pVM->pgm.s.LiveSave.Ram.cReadyPages++;
1719 if (fZero)
1720 pVM->pgm.s.LiveSave.Ram.cZeroPages++;
1721 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1722 pVM->pgm.s.LiveSave.cSavedPages++;
1723 }
1724 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1725 {
1726 GCPhysCur = GCPhys | PAGE_OFFSET_MASK;
1727 break; /* restart */
1728 }
1729
1730 } /* for each page in range */
1731
1732 if (GCPhysCur != 0)
1733 break; /* Yield + ramrange change */
1734 GCPhysCur = pCur->GCPhysLast;
1735 }
1736 } /* for each range */
1737 } while (pCur);
1738
1739 pgmUnlock(pVM);
1740
1741 return VINF_SUCCESS;
1742}
1743
1744
1745/**
1746 * Cleans up RAM pages after a live save.
1747 *
1748 * @param pVM The cross context VM structure.
1749 */
1750static void pgmR3DoneRamPages(PVM pVM)
1751{
1752 /*
1753 * Free the tracking arrays and disable write monitoring.
1754 *
1755 * Play nice with the PGM lock in case we're called while the VM is still
1756 * running. This means we have to delay the freeing since we wish to use
1757 * paLSPages as an indicator of which RAM ranges which we need to scan for
1758 * write monitored pages.
1759 */
1760 void *pvToFree = NULL;
1761 PPGMRAMRANGE pCur;
1762 uint32_t cMonitoredPages = 0;
1763 pgmLock(pVM);
1764 do
1765 {
1766 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1767 {
1768 if (pCur->paLSPages)
1769 {
1770 if (pvToFree)
1771 {
1772 uint32_t idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1773 pgmUnlock(pVM);
1774 MMR3HeapFree(pvToFree);
1775 pvToFree = NULL;
1776 pgmLock(pVM);
1777 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1778 break; /* start over again. */
1779 }
1780
1781 pvToFree = pCur->paLSPages;
1782 pCur->paLSPages = NULL;
1783
1784 uint32_t iPage = pCur->cb >> PAGE_SHIFT;
1785 while (iPage--)
1786 {
1787 PPGMPAGE pPage = &pCur->aPages[iPage];
1788 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
1789 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1790 {
1791 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1792 cMonitoredPages++;
1793 }
1794 }
1795 }
1796 }
1797 } while (pCur);
1798
1799 Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages);
1800 if (pVM->pgm.s.cMonitoredPages < cMonitoredPages)
1801 pVM->pgm.s.cMonitoredPages = 0;
1802 else
1803 pVM->pgm.s.cMonitoredPages -= cMonitoredPages;
1804
1805 pgmUnlock(pVM);
1806
1807 MMR3HeapFree(pvToFree);
1808 pvToFree = NULL;
1809}
1810
1811
1812/**
1813 * @callback_method_impl{FNSSMINTLIVEEXEC}
1814 */
1815static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1816{
1817 int rc;
1818
1819 /*
1820 * Save the MMIO2 and ROM range IDs in pass 0.
1821 */
1822 if (uPass == 0)
1823 {
1824 rc = pgmR3SaveRamConfig(pVM, pSSM);
1825 if (RT_FAILURE(rc))
1826 return rc;
1827 rc = pgmR3SaveRomRanges(pVM, pSSM);
1828 if (RT_FAILURE(rc))
1829 return rc;
1830 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1831 if (RT_FAILURE(rc))
1832 return rc;
1833 }
1834 /*
1835 * Reset the page-per-second estimate to avoid inflation by the initial
1836 * load of zero pages. pgmR3LiveVote ASSUMES this is done at pass 7.
1837 */
1838 else if (uPass == 7)
1839 {
1840 pVM->pgm.s.LiveSave.cSavedPages = 0;
1841 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
1842 }
1843
1844 /*
1845 * Do the scanning.
1846 */
1847 pgmR3ScanRomPages(pVM);
1848 pgmR3ScanMmio2Pages(pVM, uPass);
1849 pgmR3ScanRamPages(pVM, false /*fFinalPass*/);
1850 pgmR3PoolClearAll(pVM, true /*fFlushRemTlb*/); /** @todo this could perhaps be optimized a bit. */
1851
1852 /*
1853 * Save the pages.
1854 */
1855 if (uPass == 0)
1856 rc = pgmR3SaveRomVirginPages( pVM, pSSM, true /*fLiveSave*/);
1857 else
1858 rc = VINF_SUCCESS;
1859 if (RT_SUCCESS(rc))
1860 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
1861 if (RT_SUCCESS(rc))
1862 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, uPass);
1863 if (RT_SUCCESS(rc))
1864 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, uPass);
1865 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes care of it.) */
1866
1867 return rc;
1868}
1869
1870
1871/**
1872 * @callback_method_impl{FNSSMINTLIVEVOTE}
1873 */
1874static DECLCALLBACK(int) pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1875{
1876 /*
1877 * Update and calculate parameters used in the decision making.
1878 */
1879 const uint32_t cHistoryEntries = RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory);
1880
1881 /* update history. */
1882 pgmLock(pVM);
1883 uint32_t const cWrittenToPages = pVM->pgm.s.cWrittenToPages;
1884 pgmUnlock(pVM);
1885 uint32_t const cDirtyNow = pVM->pgm.s.LiveSave.Rom.cDirtyPages
1886 + pVM->pgm.s.LiveSave.Mmio2.cDirtyPages
1887 + pVM->pgm.s.LiveSave.Ram.cDirtyPages
1888 + cWrittenToPages;
1889 uint32_t i = pVM->pgm.s.LiveSave.iDirtyPagesHistory;
1890 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = cDirtyNow;
1891 pVM->pgm.s.LiveSave.iDirtyPagesHistory = (i + 1) % cHistoryEntries;
1892
1893 /* calc shortterm average (4 passes). */
1894 AssertCompile(RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory) > 4);
1895 uint64_t cTotal = pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1896 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 1) % cHistoryEntries];
1897 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 2) % cHistoryEntries];
1898 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 3) % cHistoryEntries];
1899 uint32_t const cDirtyPagesShort = cTotal / 4;
1900 pVM->pgm.s.LiveSave.cDirtyPagesShort = cDirtyPagesShort;
1901
1902 /* calc longterm average. */
1903 cTotal = 0;
1904 if (uPass < cHistoryEntries)
1905 for (i = 0; i < cHistoryEntries && i <= uPass; i++)
1906 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1907 else
1908 for (i = 0; i < cHistoryEntries; i++)
1909 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1910 uint32_t const cDirtyPagesLong = cTotal / cHistoryEntries;
1911 pVM->pgm.s.LiveSave.cDirtyPagesLong = cDirtyPagesLong;
1912
1913 /* estimate the speed */
1914 uint64_t cNsElapsed = RTTimeNanoTS() - pVM->pgm.s.LiveSave.uSaveStartNS;
1915 uint32_t cPagesPerSecond = (uint32_t)( pVM->pgm.s.LiveSave.cSavedPages
1916 / ((long double)cNsElapsed / 1000000000.0) );
1917 pVM->pgm.s.LiveSave.cPagesPerSecond = cPagesPerSecond;
1918
1919 /*
1920 * Try make a decision.
1921 */
1922 if ( cDirtyPagesShort <= cDirtyPagesLong
1923 && ( cDirtyNow <= cDirtyPagesShort
1924 || cDirtyNow - cDirtyPagesShort < RT_MIN(cDirtyPagesShort / 8, 16)
1925 )
1926 )
1927 {
1928 if (uPass > 10)
1929 {
1930 uint32_t cMsLeftShort = (uint32_t)(cDirtyPagesShort / (long double)cPagesPerSecond * 1000.0);
1931 uint32_t cMsLeftLong = (uint32_t)(cDirtyPagesLong / (long double)cPagesPerSecond * 1000.0);
1932 uint32_t cMsMaxDowntime = SSMR3HandleMaxDowntime(pSSM);
1933 if (cMsMaxDowntime < 32)
1934 cMsMaxDowntime = 32;
1935 if ( ( cMsLeftLong <= cMsMaxDowntime
1936 && cMsLeftShort < cMsMaxDowntime)
1937 || cMsLeftShort < cMsMaxDowntime / 2
1938 )
1939 {
1940 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u|%ums cDirtyPagesLong=%u|%ums cMsMaxDowntime=%u\n",
1941 uPass, cDirtyPagesShort, cMsLeftShort, cDirtyPagesLong, cMsLeftLong, cMsMaxDowntime));
1942 return VINF_SUCCESS;
1943 }
1944 }
1945 else
1946 {
1947 if ( ( cDirtyPagesShort <= 128
1948 && cDirtyPagesLong <= 1024)
1949 || cDirtyPagesLong <= 256
1950 )
1951 {
1952 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u cDirtyPagesLong=%u\n", uPass, cDirtyPagesShort, cDirtyPagesLong));
1953 return VINF_SUCCESS;
1954 }
1955 }
1956 }
1957
1958 /*
1959 * Come up with a completion percentage. Currently this is a simple
1960 * dirty page (long term) vs. total pages ratio + some pass trickery.
1961 */
1962 unsigned uPctDirty = (unsigned)( (long double)cDirtyPagesLong
1963 / (pVM->pgm.s.cAllPages - pVM->pgm.s.LiveSave.cIgnoredPages - pVM->pgm.s.cZeroPages) );
1964 if (uPctDirty <= 100)
1965 SSMR3HandleReportLivePercent(pSSM, RT_MIN(100 - uPctDirty, uPass * 2));
1966 else
1967 AssertMsgFailed(("uPctDirty=%u cDirtyPagesLong=%#x cAllPages=%#x cIgnoredPages=%#x cZeroPages=%#x\n",
1968 uPctDirty, cDirtyPagesLong, pVM->pgm.s.cAllPages, pVM->pgm.s.LiveSave.cIgnoredPages, pVM->pgm.s.cZeroPages));
1969
1970 return VINF_SSM_VOTE_FOR_ANOTHER_PASS;
1971}
1972
1973
1974/**
1975 * @callback_method_impl{FNSSMINTLIVEPREP}
1976 *
1977 * This will attempt to allocate and initialize the tracking structures. It
1978 * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
1979 * pgmR3SaveDone will do the cleanups.
1980 */
1981static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
1982{
1983 /*
1984 * Indicate that we will be using the write monitoring.
1985 */
1986 pgmLock(pVM);
1987 /** @todo find a way of mediating this when more users are added. */
1988 if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
1989 {
1990 pgmUnlock(pVM);
1991 AssertLogRelFailedReturn(VERR_PGM_WRITE_MONITOR_ENGAGED);
1992 }
1993 pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
1994 pgmUnlock(pVM);
1995
1996 /*
1997 * Initialize the statistics.
1998 */
1999 pVM->pgm.s.LiveSave.Rom.cReadyPages = 0;
2000 pVM->pgm.s.LiveSave.Rom.cDirtyPages = 0;
2001 pVM->pgm.s.LiveSave.Mmio2.cReadyPages = 0;
2002 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages = 0;
2003 pVM->pgm.s.LiveSave.Ram.cReadyPages = 0;
2004 pVM->pgm.s.LiveSave.Ram.cDirtyPages = 0;
2005 pVM->pgm.s.LiveSave.cIgnoredPages = 0;
2006 pVM->pgm.s.LiveSave.fActive = true;
2007 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory); i++)
2008 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = UINT32_MAX / 2;
2009 pVM->pgm.s.LiveSave.iDirtyPagesHistory = 0;
2010 pVM->pgm.s.LiveSave.cSavedPages = 0;
2011 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
2012 pVM->pgm.s.LiveSave.cPagesPerSecond = 8192;
2013
2014 /*
2015 * Per page type.
2016 */
2017 int rc = pgmR3PrepRomPages(pVM);
2018 if (RT_SUCCESS(rc))
2019 rc = pgmR3PrepMmio2Pages(pVM);
2020 if (RT_SUCCESS(rc))
2021 rc = pgmR3PrepRamPages(pVM);
2022
2023 NOREF(pSSM);
2024 return rc;
2025}
2026
2027
2028/**
2029 * @callback_method_impl{FNSSMINTSAVEEXEC}
2030 */
2031static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
2032{
2033 int rc = VINF_SUCCESS;
2034 PPGM pPGM = &pVM->pgm.s;
2035
2036 /*
2037 * Lock PGM and set the no-more-writes indicator.
2038 */
2039 pgmLock(pVM);
2040 pVM->pgm.s.fNoMorePhysWrites = true;
2041
2042 /*
2043 * Save basic data (required / unaffected by relocation).
2044 */
2045 bool const fMappingsFixed = pVM->pgm.s.fMappingsFixed;
2046 pVM->pgm.s.fMappingsFixed |= pVM->pgm.s.fMappingsFixedRestored;
2047 SSMR3PutStruct(pSSM, pPGM, &s_aPGMFields[0]);
2048 pVM->pgm.s.fMappingsFixed = fMappingsFixed;
2049
2050 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2051 rc = SSMR3PutStruct(pSSM, &pVM->apCpusR3[idCpu]->pgm.s, &s_aPGMCpuFields[0]);
2052
2053 /*
2054 * Save the (remainder of the) memory.
2055 */
2056 if (RT_SUCCESS(rc))
2057 {
2058 if (pVM->pgm.s.LiveSave.fActive)
2059 {
2060 pgmR3ScanRomPages(pVM);
2061 pgmR3ScanMmio2Pages(pVM, SSM_PASS_FINAL);
2062 pgmR3ScanRamPages(pVM, true /*fFinalPass*/);
2063
2064 rc = pgmR3SaveShadowedRomPages( pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
2065 if (RT_SUCCESS(rc))
2066 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2067 if (RT_SUCCESS(rc))
2068 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2069 }
2070 else
2071 {
2072 rc = pgmR3SaveRamConfig(pVM, pSSM);
2073 if (RT_SUCCESS(rc))
2074 rc = pgmR3SaveRomRanges(pVM, pSSM);
2075 if (RT_SUCCESS(rc))
2076 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
2077 if (RT_SUCCESS(rc))
2078 rc = pgmR3SaveRomVirginPages( pVM, pSSM, false /*fLiveSave*/);
2079 if (RT_SUCCESS(rc))
2080 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
2081 if (RT_SUCCESS(rc))
2082 rc = pgmR3SaveMmio2Pages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2083 if (RT_SUCCESS(rc))
2084 rc = pgmR3SaveRamPages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2085 }
2086 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
2087 }
2088
2089 pgmUnlock(pVM);
2090 return rc;
2091}
2092
2093
2094/**
2095 * @callback_method_impl{FNSSMINTSAVEDONE}
2096 */
2097static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
2098{
2099 /*
2100 * Do per page type cleanups first.
2101 */
2102 if (pVM->pgm.s.LiveSave.fActive)
2103 {
2104 pgmR3DoneRomPages(pVM);
2105 pgmR3DoneMmio2Pages(pVM);
2106 pgmR3DoneRamPages(pVM);
2107 }
2108
2109 /*
2110 * Clear the live save indicator and disengage write monitoring.
2111 */
2112 pgmLock(pVM);
2113 pVM->pgm.s.LiveSave.fActive = false;
2114 /** @todo this is blindly assuming that we're the only user of write
2115 * monitoring. Fix this when more users are added. */
2116 pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
2117 pgmUnlock(pVM);
2118
2119 NOREF(pSSM);
2120 return VINF_SUCCESS;
2121}
2122
2123
2124/**
2125 * @callback_method_impl{FNSSMINTLOADPREP}
2126 */
2127static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
2128{
2129 /*
2130 * Call the reset function to make sure all the memory is cleared.
2131 */
2132 PGMR3Reset(pVM);
2133 pVM->pgm.s.LiveSave.fActive = false;
2134 NOREF(pSSM);
2135 return VINF_SUCCESS;
2136}
2137
2138
2139/**
2140 * Load an ignored page.
2141 *
2142 * @returns VBox status code.
2143 * @param pSSM The saved state handle.
2144 */
2145static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
2146{
2147 uint8_t abPage[PAGE_SIZE];
2148 return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
2149}
2150
2151
2152/**
2153 * Compares a page with an old save type value.
2154 *
2155 * @returns true if equal, false if not.
2156 * @param pPage The page to compare.
2157 * @param uOldType The old type value from the saved state.
2158 */
2159DECLINLINE(bool) pgmR3CompareNewAndOldPageTypes(PPGMPAGE pPage, uint8_t uOldType)
2160{
2161 uint8_t uOldPageType;
2162 switch (PGM_PAGE_GET_TYPE(pPage))
2163 {
2164 case PGMPAGETYPE_INVALID: uOldPageType = PGMPAGETYPE_OLD_INVALID; break;
2165 case PGMPAGETYPE_RAM: uOldPageType = PGMPAGETYPE_OLD_RAM; break;
2166 case PGMPAGETYPE_MMIO2: uOldPageType = PGMPAGETYPE_OLD_MMIO2; break;
2167 case PGMPAGETYPE_MMIO2_ALIAS_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO; break;
2168 case PGMPAGETYPE_ROM_SHADOW: uOldPageType = PGMPAGETYPE_OLD_ROM_SHADOW; break;
2169 case PGMPAGETYPE_ROM: uOldPageType = PGMPAGETYPE_OLD_ROM; break;
2170 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: RT_FALL_THRU();
2171 case PGMPAGETYPE_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO; break;
2172 default:
2173 AssertFailed();
2174 uOldPageType = PGMPAGETYPE_OLD_INVALID;
2175 break;
2176 }
2177 return uOldPageType == uOldType;
2178}
2179
2180
2181/**
2182 * Loads a page without any bits in the saved state, i.e. making sure it's
2183 * really zero.
2184 *
2185 * @returns VBox status code.
2186 * @param pVM The cross context VM structure.
2187 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2188 * state).
2189 * @param pPage The guest page tracking structure.
2190 * @param GCPhys The page address.
2191 * @param pRam The ram range (logging).
2192 */
2193static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2194{
2195 if ( uOldType != PGMPAGETYPE_OLD_INVALID
2196 && !pgmR3CompareNewAndOldPageTypes(pPage, uOldType))
2197 return VERR_SSM_UNEXPECTED_DATA;
2198
2199 /* I think this should be sufficient. */
2200 if ( !PGM_PAGE_IS_ZERO(pPage)
2201 && !PGM_PAGE_IS_BALLOONED(pPage))
2202 return VERR_SSM_UNEXPECTED_DATA;
2203
2204 NOREF(pVM);
2205 NOREF(GCPhys);
2206 NOREF(pRam);
2207 return VINF_SUCCESS;
2208}
2209
2210
2211/**
2212 * Loads a page from the saved state.
2213 *
2214 * @returns VBox status code.
2215 * @param pVM The cross context VM structure.
2216 * @param pSSM The SSM handle.
2217 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2218 * state).
2219 * @param pPage The guest page tracking structure.
2220 * @param GCPhys The page address.
2221 * @param pRam The ram range (logging).
2222 */
2223static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2224{
2225 /*
2226 * Match up the type, dealing with MMIO2 aliases (dropped).
2227 */
2228 AssertLogRelMsgReturn( uOldType == PGMPAGETYPE_INVALID
2229 || pgmR3CompareNewAndOldPageTypes(pPage, uOldType)
2230 /* kudge for the expanded PXE bios (r67885) - @bugref{5687}: */
2231 || ( uOldType == PGMPAGETYPE_OLD_RAM
2232 && GCPhys >= 0xed000
2233 && GCPhys <= 0xeffff
2234 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM)
2235 ,
2236 ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
2237 VERR_SSM_UNEXPECTED_DATA);
2238
2239 /*
2240 * Load the page.
2241 */
2242 PGMPAGEMAPLOCK PgMpLck;
2243 void *pvPage;
2244 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
2245 if (RT_SUCCESS(rc))
2246 {
2247 rc = SSMR3GetMem(pSSM, pvPage, PAGE_SIZE);
2248 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2249 }
2250
2251 return rc;
2252}
2253
2254
2255/**
2256 * Loads a page (counter part to pgmR3SavePage).
2257 *
2258 * @returns VBox status code, fully bitched errors.
2259 * @param pVM The cross context VM structure.
2260 * @param pSSM The SSM handle.
2261 * @param uOldType The page type.
2262 * @param pPage The page.
2263 * @param GCPhys The page address.
2264 * @param pRam The RAM range (for error messages).
2265 */
2266static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2267{
2268 uint8_t uState;
2269 int rc = SSMR3GetU8(pSSM, &uState);
2270 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
2271 if (uState == 0 /* zero */)
2272 rc = pgmR3LoadPageZeroOld(pVM, uOldType, pPage, GCPhys, pRam);
2273 else if (uState == 1)
2274 rc = pgmR3LoadPageBitsOld(pVM, pSSM, uOldType, pPage, GCPhys, pRam);
2275 else
2276 rc = VERR_PGM_INVALID_SAVED_PAGE_STATE;
2277 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uOldType=%d GCPhys=%RGp %s rc=%Rrc\n",
2278 pPage, uState, uOldType, GCPhys, pRam->pszDesc, rc),
2279 rc);
2280 return VINF_SUCCESS;
2281}
2282
2283
2284/**
2285 * Loads a shadowed ROM page.
2286 *
2287 * @returns VBox status code, errors are fully bitched.
2288 * @param pVM The cross context VM structure.
2289 * @param pSSM The saved state handle.
2290 * @param pPage The page.
2291 * @param GCPhys The page address.
2292 * @param pRam The RAM range (for error messages).
2293 */
2294static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2295{
2296 /*
2297 * Load and set the protection first, then load the two pages, the first
2298 * one is the active the other is the passive.
2299 */
2300 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
2301 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2302
2303 uint8_t uProt;
2304 int rc = SSMR3GetU8(pSSM, &uProt);
2305 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
2306 PGMROMPROT enmProt = (PGMROMPROT)uProt;
2307 AssertLogRelMsgReturn( enmProt >= PGMROMPROT_INVALID
2308 && enmProt < PGMROMPROT_END,
2309 ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
2310 VERR_SSM_UNEXPECTED_DATA);
2311
2312 if (pRomPage->enmProt != enmProt)
2313 {
2314 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
2315 AssertLogRelRCReturn(rc, rc);
2316 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2317 }
2318
2319 PPGMPAGE pPageActive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2320 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2321 uint8_t u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM : PGMPAGETYPE_ROM_SHADOW;
2322 uint8_t u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
2323
2324 /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
2325 * used down the line (will the 2nd page will be written to the first
2326 * one because of a false TLB hit since the TLB is using GCPhys and
2327 * doesn't check the HCPhys of the desired page). */
2328 rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
2329 if (RT_SUCCESS(rc))
2330 {
2331 *pPageActive = *pPage;
2332 rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
2333 }
2334 return rc;
2335}
2336
2337/**
2338 * Ram range flags and bits for older versions of the saved state.
2339 *
2340 * @returns VBox status code.
2341 *
2342 * @param pVM The cross context VM structure.
2343 * @param pSSM The SSM handle.
2344 * @param uVersion The saved state version.
2345 */
2346static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2347{
2348 PPGM pPGM = &pVM->pgm.s;
2349
2350 /*
2351 * Ram range flags and bits.
2352 */
2353 uint32_t i = 0;
2354 for (PPGMRAMRANGE pRam = pPGM->pRamRangesXR3; ; pRam = pRam->pNextR3, i++)
2355 {
2356 /* Check the sequence number / separator. */
2357 uint32_t u32Sep;
2358 int rc = SSMR3GetU32(pSSM, &u32Sep);
2359 if (RT_FAILURE(rc))
2360 return rc;
2361 if (u32Sep == ~0U)
2362 break;
2363 if (u32Sep != i)
2364 {
2365 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2366 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2367 }
2368 AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2369
2370 /* Get the range details. */
2371 RTGCPHYS GCPhys;
2372 SSMR3GetGCPhys(pSSM, &GCPhys);
2373 RTGCPHYS GCPhysLast;
2374 SSMR3GetGCPhys(pSSM, &GCPhysLast);
2375 RTGCPHYS cb;
2376 SSMR3GetGCPhys(pSSM, &cb);
2377 uint8_t fHaveBits;
2378 rc = SSMR3GetU8(pSSM, &fHaveBits);
2379 if (RT_FAILURE(rc))
2380 return rc;
2381 if (fHaveBits & ~1)
2382 {
2383 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2384 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2385 }
2386 size_t cchDesc = 0;
2387 char szDesc[256];
2388 szDesc[0] = '\0';
2389 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2390 {
2391 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
2392 if (RT_FAILURE(rc))
2393 return rc;
2394 /* Since we've modified the description strings in r45878, only compare
2395 them if the saved state is more recent. */
2396 if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
2397 cchDesc = strlen(szDesc);
2398 }
2399
2400 /*
2401 * Match it up with the current range.
2402 *
2403 * Note there is a hack for dealing with the high BIOS mapping
2404 * in the old saved state format, this means we might not have
2405 * a 1:1 match on success.
2406 */
2407 if ( ( GCPhys != pRam->GCPhys
2408 || GCPhysLast != pRam->GCPhysLast
2409 || cb != pRam->cb
2410 || ( cchDesc
2411 && strcmp(szDesc, pRam->pszDesc)) )
2412 /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
2413 && ( uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
2414 || GCPhys != UINT32_C(0xfff80000)
2415 || GCPhysLast != UINT32_C(0xffffffff)
2416 || pRam->GCPhysLast != GCPhysLast
2417 || pRam->GCPhys < GCPhys
2418 || !fHaveBits)
2419 )
2420 {
2421 LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
2422 "State : %RGp-%RGp %RGp bytes %s %s\n",
2423 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc,
2424 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
2425 /*
2426 * If we're loading a state for debugging purpose, don't make a fuss if
2427 * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
2428 */
2429 if ( SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
2430 || GCPhys < 8 * _1M)
2431 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2432 N_("RAM range mismatch; saved={%RGp-%RGp %RGp bytes %s %s} config={%RGp-%RGp %RGp bytes %s %s}"),
2433 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc,
2434 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc);
2435
2436 AssertMsgFailed(("debug skipping not implemented, sorry\n"));
2437 continue;
2438 }
2439
2440 uint32_t cPages = (GCPhysLast - GCPhys + 1) >> PAGE_SHIFT;
2441 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2442 {
2443 /*
2444 * Load the pages one by one.
2445 */
2446 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2447 {
2448 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2449 PPGMPAGE pPage = &pRam->aPages[iPage];
2450 uint8_t uOldType;
2451 rc = SSMR3GetU8(pSSM, &uOldType);
2452 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
2453 if (uOldType == PGMPAGETYPE_OLD_ROM_SHADOW)
2454 rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
2455 else
2456 rc = pgmR3LoadPageOld(pVM, pSSM, uOldType, pPage, GCPhysPage, pRam);
2457 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2458 }
2459 }
2460 else
2461 {
2462 /*
2463 * Old format.
2464 */
2465
2466 /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
2467 The rest is generally irrelevant and wrong since the stuff have to match registrations. */
2468 uint32_t fFlags = 0;
2469 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2470 {
2471 uint16_t u16Flags;
2472 rc = SSMR3GetU16(pSSM, &u16Flags);
2473 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2474 fFlags |= u16Flags;
2475 }
2476
2477 /* Load the bits */
2478 if ( !fHaveBits
2479 && GCPhysLast < UINT32_C(0xe0000000))
2480 {
2481 /*
2482 * Dynamic chunks.
2483 */
2484 const uint32_t cPagesInChunk = (1*1024*1024) >> PAGE_SHIFT;
2485 AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
2486 ("cPages=%#x cPagesInChunk=%#x GCPhys=%RGp %s\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
2487 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2488
2489 for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
2490 {
2491 uint8_t fPresent;
2492 rc = SSMR3GetU8(pSSM, &fPresent);
2493 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2494 AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
2495 ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
2496 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2497
2498 for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
2499 {
2500 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2501 PPGMPAGE pPage = &pRam->aPages[iPage];
2502 if (fPresent)
2503 {
2504 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO
2505 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
2506 rc = pgmR3LoadPageToDevNullOld(pSSM);
2507 else
2508 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2509 }
2510 else
2511 rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2512 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2513 }
2514 }
2515 }
2516 else if (pRam->pvR3)
2517 {
2518 /*
2519 * MMIO2.
2520 */
2521 AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
2522 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2523 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2524 AssertLogRelMsgReturn(pRam->pvR3,
2525 ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
2526 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2527
2528 rc = SSMR3GetMem(pSSM, pRam->pvR3, pRam->cb);
2529 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
2530 }
2531 else if (GCPhysLast < UINT32_C(0xfff80000))
2532 {
2533 /*
2534 * PCI MMIO, no pages saved.
2535 */
2536 }
2537 else
2538 {
2539 /*
2540 * Load the 0xfff80000..0xffffffff BIOS range.
2541 * It starts with X reserved pages that we have to skip over since
2542 * the RAMRANGE create by the new code won't include those.
2543 */
2544 AssertLogRelMsgReturn( !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
2545 && (fFlags & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
2546 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2547 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2548 AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
2549 ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
2550 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2551
2552 /* Skip wasted reserved pages before the ROM. */
2553 while (GCPhys < pRam->GCPhys)
2554 {
2555 rc = pgmR3LoadPageToDevNullOld(pSSM);
2556 GCPhys += PAGE_SIZE;
2557 }
2558
2559 /* Load the bios pages. */
2560 cPages = pRam->cb >> PAGE_SHIFT;
2561 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2562 {
2563 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2564 PPGMPAGE pPage = &pRam->aPages[iPage];
2565
2566 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
2567 ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
2568 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2569 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
2570 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2571 }
2572 }
2573 }
2574 }
2575
2576 return VINF_SUCCESS;
2577}
2578
2579
2580/**
2581 * Worker for pgmR3Load and pgmR3LoadLocked.
2582 *
2583 * @returns VBox status code.
2584 *
2585 * @param pVM The cross context VM structure.
2586 * @param pSSM The SSM handle.
2587 * @param uVersion The PGM saved state unit version.
2588 * @param uPass The pass number.
2589 *
2590 * @todo This needs splitting up if more record types or code twists are
2591 * added...
2592 */
2593static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2594{
2595 NOREF(uPass);
2596
2597 /*
2598 * Process page records until we hit the terminator.
2599 */
2600 RTGCPHYS GCPhys = NIL_RTGCPHYS;
2601 PPGMRAMRANGE pRamHint = NULL;
2602 uint8_t id = UINT8_MAX;
2603 uint32_t iPage = UINT32_MAX - 10;
2604 PPGMROMRANGE pRom = NULL;
2605 PPGMREGMMIORANGE pRegMmio = NULL;
2606
2607 /*
2608 * We batch up pages that should be freed instead of calling GMM for
2609 * each and every one of them. Note that we'll lose the pages in most
2610 * failure paths - this should probably be addressed one day.
2611 */
2612 uint32_t cPendingPages = 0;
2613 PGMMFREEPAGESREQ pReq;
2614 int rc = GMMR3FreePagesPrepare(pVM, &pReq, 128 /* batch size */, GMMACCOUNT_BASE);
2615 AssertLogRelRCReturn(rc, rc);
2616
2617 for (;;)
2618 {
2619 /*
2620 * Get the record type and flags.
2621 */
2622 uint8_t u8;
2623 rc = SSMR3GetU8(pSSM, &u8);
2624 if (RT_FAILURE(rc))
2625 return rc;
2626 if (u8 == PGM_STATE_REC_END)
2627 {
2628 /*
2629 * Finish off any pages pending freeing.
2630 */
2631 if (cPendingPages)
2632 {
2633 Log(("pgmR3LoadMemory: GMMR3FreePagesPerform pVM=%p cPendingPages=%u\n", pVM, cPendingPages));
2634 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2635 AssertLogRelRCReturn(rc, rc);
2636 }
2637 GMMR3FreePagesCleanup(pReq);
2638 return VINF_SUCCESS;
2639 }
2640 AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2641 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2642 {
2643 /*
2644 * RAM page.
2645 */
2646 case PGM_STATE_REC_RAM_ZERO:
2647 case PGM_STATE_REC_RAM_RAW:
2648 case PGM_STATE_REC_RAM_BALLOONED:
2649 {
2650 /*
2651 * Get the address and resolve it into a page descriptor.
2652 */
2653 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2654 GCPhys += PAGE_SIZE;
2655 else
2656 {
2657 rc = SSMR3GetGCPhys(pSSM, &GCPhys);
2658 if (RT_FAILURE(rc))
2659 return rc;
2660 }
2661 AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2662
2663 PPGMPAGE pPage;
2664 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
2665 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2666
2667 /*
2668 * Take action according to the record type.
2669 */
2670 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2671 {
2672 case PGM_STATE_REC_RAM_ZERO:
2673 {
2674 if (PGM_PAGE_IS_ZERO(pPage))
2675 break;
2676
2677 /* Ballooned pages must be unmarked (live snapshot and
2678 teleportation scenarios). */
2679 if (PGM_PAGE_IS_BALLOONED(pPage))
2680 {
2681 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2682 if (uVersion == PGM_SAVED_STATE_VERSION_BALLOON_BROKEN)
2683 break;
2684 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2685 break;
2686 }
2687
2688 AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_UNEXPECTED_PAGE_STATE);
2689
2690 /* If this is a ROM page, we must clear it and not try to
2691 * free it. Ditto if the VM is using RamPreAlloc (see
2692 * @bugref{6318}). */
2693 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM
2694 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM_SHADOW
2695 || pVM->pgm.s.fRamPreAlloc)
2696 {
2697 PGMPAGEMAPLOCK PgMpLck;
2698 void *pvDstPage;
2699 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2700 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2701
2702 ASMMemZeroPage(pvDstPage);
2703 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2704 }
2705 /* Free it only if it's not part of a previously
2706 allocated large page (no need to clear the page). */
2707 else if ( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2708 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED)
2709 {
2710 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2711 AssertRCReturn(rc, rc);
2712 }
2713 /** @todo handle large pages (see @bugref{5545}) */
2714 break;
2715 }
2716
2717 case PGM_STATE_REC_RAM_BALLOONED:
2718 {
2719 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2720 if (PGM_PAGE_IS_BALLOONED(pPage))
2721 break;
2722
2723 /* We don't map ballooned pages in our shadow page tables, let's
2724 just free it if allocated and mark as ballooned. See @bugref{5515}. */
2725 if (PGM_PAGE_IS_ALLOCATED(pPage))
2726 {
2727 /** @todo handle large pages + ballooning when it works. (see @bugref{5515},
2728 * @bugref{5545}). */
2729 AssertLogRelMsgReturn( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2730 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED,
2731 ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_LOAD_UNEXPECTED_PAGE_TYPE);
2732
2733 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2734 AssertRCReturn(rc, rc);
2735 }
2736 Assert(PGM_PAGE_IS_ZERO(pPage));
2737 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
2738 break;
2739 }
2740
2741 case PGM_STATE_REC_RAM_RAW:
2742 {
2743 PGMPAGEMAPLOCK PgMpLck;
2744 void *pvDstPage;
2745 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2746 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2747 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2748 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2749 if (RT_FAILURE(rc))
2750 return rc;
2751 break;
2752 }
2753
2754 default:
2755 AssertMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
2756 }
2757 id = UINT8_MAX;
2758 break;
2759 }
2760
2761 /*
2762 * MMIO2 page.
2763 */
2764 case PGM_STATE_REC_MMIO2_RAW:
2765 case PGM_STATE_REC_MMIO2_ZERO:
2766 {
2767 /*
2768 * Get the ID + page number and resolved that into a MMIO2 page.
2769 */
2770 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2771 iPage++;
2772 else
2773 {
2774 SSMR3GetU8(pSSM, &id);
2775 rc = SSMR3GetU32(pSSM, &iPage);
2776 if (RT_FAILURE(rc))
2777 return rc;
2778 }
2779 if ( !pRegMmio
2780 || pRegMmio->idSavedState != id)
2781 {
2782 for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
2783 if ( pRegMmio->idSavedState == id
2784 && (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2))
2785 break;
2786 AssertLogRelMsgReturn(pRegMmio, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND);
2787 }
2788 AssertLogRelMsgReturn(iPage < (pRegMmio->RamRange.cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pRegMmio->RamRange.cb, pRegMmio->RamRange.pszDesc), VERR_PGM_SAVED_MMIO2_PAGE_NOT_FOUND);
2789 void *pvDstPage = (uint8_t *)pRegMmio->RamRange.pvR3 + ((size_t)iPage << PAGE_SHIFT);
2790
2791 /*
2792 * Load the page bits.
2793 */
2794 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_MMIO2_ZERO)
2795 ASMMemZeroPage(pvDstPage);
2796 else
2797 {
2798 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2799 if (RT_FAILURE(rc))
2800 return rc;
2801 }
2802 GCPhys = NIL_RTGCPHYS;
2803 break;
2804 }
2805
2806 /*
2807 * ROM pages.
2808 */
2809 case PGM_STATE_REC_ROM_VIRGIN:
2810 case PGM_STATE_REC_ROM_SHW_RAW:
2811 case PGM_STATE_REC_ROM_SHW_ZERO:
2812 case PGM_STATE_REC_ROM_PROT:
2813 {
2814 /*
2815 * Get the ID + page number and resolved that into a ROM page descriptor.
2816 */
2817 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2818 iPage++;
2819 else
2820 {
2821 SSMR3GetU8(pSSM, &id);
2822 rc = SSMR3GetU32(pSSM, &iPage);
2823 if (RT_FAILURE(rc))
2824 return rc;
2825 }
2826 if ( !pRom
2827 || pRom->idSavedState != id)
2828 {
2829 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2830 if (pRom->idSavedState == id)
2831 break;
2832 AssertLogRelMsgReturn(pRom, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_ROM_RANGE_NOT_FOUND);
2833 }
2834 AssertLogRelMsgReturn(iPage < (pRom->cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pRom->cb, pRom->pszDesc), VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2835 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2836 GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
2837
2838 /*
2839 * Get and set the protection.
2840 */
2841 uint8_t u8Prot;
2842 rc = SSMR3GetU8(pSSM, &u8Prot);
2843 if (RT_FAILURE(rc))
2844 return rc;
2845 PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
2846 AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_PGM_SAVED_ROM_PAGE_PROT);
2847
2848 if (enmProt != pRomPage->enmProt)
2849 {
2850 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
2851 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2852 N_("Protection change of unshadowed ROM page: GCPhys=%RGp enmProt=%d %s"),
2853 GCPhys, enmProt, pRom->pszDesc);
2854 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
2855 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2856 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2857 }
2858 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
2859 break; /* done */
2860
2861 /*
2862 * Get the right page descriptor.
2863 */
2864 PPGMPAGE pRealPage;
2865 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2866 {
2867 case PGM_STATE_REC_ROM_VIRGIN:
2868 if (!PGMROMPROT_IS_ROM(enmProt))
2869 pRealPage = &pRomPage->Virgin;
2870 else
2871 pRealPage = NULL;
2872 break;
2873
2874 case PGM_STATE_REC_ROM_SHW_RAW:
2875 case PGM_STATE_REC_ROM_SHW_ZERO:
2876 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
2877 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2878 N_("Shadowed / non-shadowed page type mismatch: GCPhys=%RGp enmProt=%d %s"),
2879 GCPhys, enmProt, pRom->pszDesc);
2880 if (PGMROMPROT_IS_ROM(enmProt))
2881 pRealPage = &pRomPage->Shadow;
2882 else
2883 pRealPage = NULL;
2884 break;
2885
2886 default: AssertLogRelFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE); /* shut up gcc */
2887 }
2888 if (!pRealPage)
2889 {
2890 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pRealPage, &pRamHint);
2891 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2892 }
2893
2894 /*
2895 * Make it writable and map it (if necessary).
2896 */
2897 void *pvDstPage = NULL;
2898 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2899 {
2900 case PGM_STATE_REC_ROM_SHW_ZERO:
2901 if ( PGM_PAGE_IS_ZERO(pRealPage)
2902 || PGM_PAGE_IS_BALLOONED(pRealPage))
2903 break;
2904 /** @todo implement zero page replacing. */
2905 RT_FALL_THRU();
2906 case PGM_STATE_REC_ROM_VIRGIN:
2907 case PGM_STATE_REC_ROM_SHW_RAW:
2908 {
2909 rc = pgmPhysPageMakeWritableAndMap(pVM, pRealPage, GCPhys, &pvDstPage);
2910 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2911 break;
2912 }
2913 }
2914
2915 /*
2916 * Load the bits.
2917 */
2918 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2919 {
2920 case PGM_STATE_REC_ROM_SHW_ZERO:
2921 if (pvDstPage)
2922 ASMMemZeroPage(pvDstPage);
2923 break;
2924
2925 case PGM_STATE_REC_ROM_VIRGIN:
2926 case PGM_STATE_REC_ROM_SHW_RAW:
2927 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2928 if (RT_FAILURE(rc))
2929 return rc;
2930 break;
2931 }
2932 GCPhys = NIL_RTGCPHYS;
2933 break;
2934 }
2935
2936 /*
2937 * Unknown type.
2938 */
2939 default:
2940 AssertLogRelMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
2941 }
2942 } /* forever */
2943}
2944
2945
2946/**
2947 * Worker for pgmR3Load.
2948 *
2949 * @returns VBox status code.
2950 *
2951 * @param pVM The cross context VM structure.
2952 * @param pSSM The SSM handle.
2953 * @param uVersion The saved state version.
2954 */
2955static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2956{
2957 PPGM pPGM = &pVM->pgm.s;
2958 int rc;
2959 uint32_t u32Sep;
2960
2961 /*
2962 * Load basic data (required / unaffected by relocation).
2963 */
2964 if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
2965 {
2966 if (uVersion > PGM_SAVED_STATE_VERSION_PRE_BALLOON)
2967 rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFields[0]);
2968 else
2969 rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFieldsPreBalloon[0]);
2970
2971 AssertLogRelRCReturn(rc, rc);
2972
2973 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2974 {
2975 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
2976 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFieldsPrePae[0]);
2977 else
2978 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFields[0]);
2979 AssertLogRelRCReturn(rc, rc);
2980 }
2981 }
2982 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2983 {
2984 AssertRelease(pVM->cCpus == 1);
2985
2986 PGMOLD pgmOld;
2987 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
2988 AssertLogRelRCReturn(rc, rc);
2989
2990 pPGM->fMappingsFixed = pgmOld.fMappingsFixed;
2991 pPGM->GCPtrMappingFixed = pgmOld.GCPtrMappingFixed;
2992 pPGM->cbMappingFixed = pgmOld.cbMappingFixed;
2993
2994 PVMCPU pVCpu0 = pVM->apCpusR3[0];
2995 pVCpu0->pgm.s.fA20Enabled = pgmOld.fA20Enabled;
2996 pVCpu0->pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
2997 pVCpu0->pgm.s.enmGuestMode = pgmOld.enmGuestMode;
2998 }
2999 else
3000 {
3001 AssertRelease(pVM->cCpus == 1);
3002
3003 SSMR3GetBool(pSSM, &pPGM->fMappingsFixed);
3004 SSMR3GetGCPtr(pSSM, &pPGM->GCPtrMappingFixed);
3005 SSMR3GetU32(pSSM, &pPGM->cbMappingFixed);
3006
3007 uint32_t cbRamSizeIgnored;
3008 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
3009 if (RT_FAILURE(rc))
3010 return rc;
3011 PVMCPU pVCpu0 = pVM->apCpusR3[0];
3012 SSMR3GetGCPhys(pSSM, &pVCpu0->pgm.s.GCPhysA20Mask);
3013
3014 uint32_t u32 = 0;
3015 SSMR3GetUInt(pSSM, &u32);
3016 pVCpu0->pgm.s.fA20Enabled = !!u32;
3017 SSMR3GetUInt(pSSM, &pVCpu0->pgm.s.fSyncFlags);
3018 RTUINT uGuestMode;
3019 SSMR3GetUInt(pSSM, &uGuestMode);
3020 pVCpu0->pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
3021
3022 /* check separator. */
3023 SSMR3GetU32(pSSM, &u32Sep);
3024 if (RT_FAILURE(rc))
3025 return rc;
3026 if (u32Sep != (uint32_t)~0)
3027 {
3028 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
3029 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
3030 }
3031 }
3032
3033 /*
3034 * Fix the A20 mask.
3035 */
3036 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3037 {
3038 PVMCPU pVCpu = pVM->apCpusR3[i];
3039 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!pVCpu->pgm.s.fA20Enabled << 20);
3040 pgmR3RefreshShadowModeAfterA20Change(pVCpu);
3041 }
3042
3043 /*
3044 * The guest mappings - skipped now, see re-fixation in the caller.
3045 */
3046 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
3047 {
3048 for (uint32_t i = 0; ; i++)
3049 {
3050 rc = SSMR3GetU32(pSSM, &u32Sep); /* sequence number */
3051 if (RT_FAILURE(rc))
3052 return rc;
3053 if (u32Sep == ~0U)
3054 break;
3055 AssertMsgReturn(u32Sep == i, ("u32Sep=%#x i=%#x\n", u32Sep, i), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
3056
3057 char szDesc[256];
3058 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
3059 if (RT_FAILURE(rc))
3060 return rc;
3061 RTGCPTR GCPtrIgnore;
3062 SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* GCPtr */
3063 rc = SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* cPTs */
3064 if (RT_FAILURE(rc))
3065 return rc;
3066 }
3067 }
3068
3069 /*
3070 * Load the RAM contents.
3071 */
3072 if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
3073 {
3074 if (!pVM->pgm.s.LiveSave.fActive)
3075 {
3076 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3077 {
3078 rc = pgmR3LoadRamConfig(pVM, pSSM);
3079 if (RT_FAILURE(rc))
3080 return rc;
3081 }
3082 rc = pgmR3LoadRomRanges(pVM, pSSM);
3083 if (RT_FAILURE(rc))
3084 return rc;
3085 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3086 if (RT_FAILURE(rc))
3087 return rc;
3088 }
3089
3090 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, SSM_PASS_FINAL);
3091 }
3092 else
3093 rc = pgmR3LoadMemoryOld(pVM, pSSM, uVersion);
3094
3095 /* Refresh balloon accounting. */
3096 if (pVM->pgm.s.cBalloonedPages)
3097 {
3098 Log(("pgmR3LoadFinalLocked: pVM=%p cBalloonedPages=%#x\n", pVM, pVM->pgm.s.cBalloonedPages));
3099 rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_INFLATE, pVM->pgm.s.cBalloonedPages);
3100 AssertRCReturn(rc, rc);
3101 }
3102 return rc;
3103}
3104
3105
3106/**
3107 * @callback_method_impl{FNSSMINTLOADEXEC}
3108 */
3109static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3110{
3111 int rc;
3112
3113 /*
3114 * Validate version.
3115 */
3116 if ( ( uPass != SSM_PASS_FINAL
3117 && uVersion != PGM_SAVED_STATE_VERSION
3118 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3119 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3120 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3121 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3122 || ( uVersion != PGM_SAVED_STATE_VERSION
3123 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3124 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3125 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3126 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG
3127 && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
3128 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
3129 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
3130 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
3131 )
3132 {
3133 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
3134 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3135 }
3136
3137 /*
3138 * Do the loading while owning the lock because a bunch of the functions
3139 * we're using requires this.
3140 */
3141 if (uPass != SSM_PASS_FINAL)
3142 {
3143 pgmLock(pVM);
3144 if (uPass != 0)
3145 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3146 else
3147 {
3148 pVM->pgm.s.LiveSave.fActive = true;
3149 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3150 rc = pgmR3LoadRamConfig(pVM, pSSM);
3151 else
3152 rc = VINF_SUCCESS;
3153 if (RT_SUCCESS(rc))
3154 rc = pgmR3LoadRomRanges(pVM, pSSM);
3155 if (RT_SUCCESS(rc))
3156 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3157 if (RT_SUCCESS(rc))
3158 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3159 }
3160 pgmUnlock(pVM);
3161 }
3162 else
3163 {
3164 pgmLock(pVM);
3165 rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
3166 pVM->pgm.s.LiveSave.fActive = false;
3167 pgmUnlock(pVM);
3168 if (RT_SUCCESS(rc))
3169 {
3170 /*
3171 * We require a full resync now.
3172 */
3173 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3174 {
3175 PVMCPU pVCpu = pVM->apCpusR3[i];
3176 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
3177 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3178 /** @todo For guest PAE, we might get the wrong
3179 * aGCPhysGstPaePDs values now. We should used the
3180 * saved ones... Postponing this since it nothing new
3181 * and PAE/PDPTR needs some general readjusting, see
3182 * @bugref{5880}. */
3183 }
3184
3185 pgmR3HandlerPhysicalUpdateAll(pVM);
3186
3187 /*
3188 * Change the paging mode (indirectly restores PGMCPU::GCPhysCR3).
3189 * (Requires the CPUM state to be restored already!)
3190 */
3191 if (CPUMR3IsStateRestorePending(pVM))
3192 return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
3193 N_("PGM was unexpectedly restored before CPUM"));
3194
3195 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3196 {
3197 PVMCPU pVCpu = pVM->apCpusR3[i];
3198
3199 rc = PGMHCChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode);
3200 AssertLogRelRCReturn(rc, rc);
3201
3202 /* Update the PSE, NX flags and validity masks. */
3203 pVCpu->pgm.s.fGst32BitPageSizeExtension = CPUMIsGuestPageSizeExtEnabled(pVCpu);
3204 PGMNotifyNxeChanged(pVCpu, CPUMIsGuestNXEnabled(pVCpu));
3205 }
3206
3207 /*
3208 * Try re-fixate the guest mappings.
3209 */
3210 pVM->pgm.s.fMappingsFixedRestored = false;
3211 if ( pVM->pgm.s.fMappingsFixed
3212 && pgmMapAreMappingsEnabled(pVM))
3213 {
3214#ifndef PGM_WITHOUT_MAPPINGS
3215 RTGCPTR GCPtrFixed = pVM->pgm.s.GCPtrMappingFixed;
3216 uint32_t cbFixed = pVM->pgm.s.cbMappingFixed;
3217 pVM->pgm.s.fMappingsFixed = false;
3218
3219 uint32_t cbRequired;
3220 int rc2 = PGMR3MappingsSize(pVM, &cbRequired); AssertRC(rc2);
3221 if ( RT_SUCCESS(rc2)
3222 && cbRequired > cbFixed)
3223 rc2 = VERR_OUT_OF_RANGE;
3224 if (RT_SUCCESS(rc2))
3225 rc2 = pgmR3MappingsFixInternal(pVM, GCPtrFixed, cbFixed);
3226 if (RT_FAILURE(rc2))
3227 {
3228 LogRel(("PGM: Unable to re-fixate the guest mappings at %RGv-%RGv: rc=%Rrc (cbRequired=%#x)\n",
3229 GCPtrFixed, GCPtrFixed + cbFixed, rc2, cbRequired));
3230 pVM->pgm.s.fMappingsFixed = false;
3231 pVM->pgm.s.fMappingsFixedRestored = true;
3232 pVM->pgm.s.GCPtrMappingFixed = GCPtrFixed;
3233 pVM->pgm.s.cbMappingFixed = cbFixed;
3234 }
3235#else
3236 AssertFailed();
3237#endif
3238 }
3239 else
3240 {
3241 /* We used to set fixed + disabled while we only use disabled now,
3242 so wipe the state to avoid any confusion. */
3243 pVM->pgm.s.fMappingsFixed = false;
3244 pVM->pgm.s.GCPtrMappingFixed = NIL_RTGCPTR;
3245 pVM->pgm.s.cbMappingFixed = 0;
3246 }
3247
3248 /*
3249 * If we have floating mappings, do a CR3 sync now to make sure the HMA
3250 * doesn't conflict with guest code / data and thereby cause trouble
3251 * when restoring other components like PATM.
3252 */
3253 if (pgmMapAreMappingsFloating(pVM))
3254 {
3255 PVMCPU pVCpu = pVM->apCpusR3[0];
3256 rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), true);
3257 if (RT_FAILURE(rc))
3258 return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
3259 N_("PGMSyncCR3 failed unexpectedly with rc=%Rrc"), rc);
3260
3261 /* Make sure to re-sync before executing code. */
3262 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
3263 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3264 }
3265 }
3266 }
3267
3268 return rc;
3269}
3270
3271
3272/**
3273 * @callback_method_impl{FNSSMINTLOADDONE}
3274 */
3275static DECLCALLBACK(int) pgmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
3276{
3277 pVM->pgm.s.fRestoreRomPagesOnReset = true;
3278 NOREF(pSSM);
3279 return VINF_SUCCESS;
3280}
3281
3282
3283/**
3284 * Registers the saved state callbacks with SSM.
3285 *
3286 * @returns VBox status code.
3287 * @param pVM The cross context VM structure.
3288 * @param cbRam The RAM size.
3289 */
3290int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
3291{
3292 return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
3293 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
3294 NULL, pgmR3SaveExec, pgmR3SaveDone,
3295 pgmR3LoadPrep, pgmR3Load, pgmR3LoadDone);
3296}
3297
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette