VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp@ 80191

Last change on this file since 80191 was 80191, checked in by vboxsync, 5 years ago

VMM/r3: Refactored VMCPU enumeration in preparation that aCpus will be replaced with a pointer array. Removed two raw-mode offset members from the CPUM and CPUMCPU sub-structures. bugref:9217 bugref:9517

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 127.0 KB
Line 
1/* $Id: PGMSavedState.cpp 80191 2019-08-08 00:36:57Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, The Saved State Part.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_BUGREF_9217_PART_I
23#define LOG_GROUP LOG_GROUP_PGM
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/ssm.h>
27#include <VBox/vmm/pdmdrv.h>
28#include <VBox/vmm/pdmdev.h>
29#include "PGMInternal.h"
30#include <VBox/vmm/vm.h>
31#include "PGMInline.h"
32
33#include <VBox/param.h>
34#include <VBox/err.h>
35
36#include <iprt/asm.h>
37#include <iprt/assert.h>
38#include <iprt/crc.h>
39#include <iprt/mem.h>
40#include <iprt/sha.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43
44
45/*********************************************************************************************************************************
46* Defined Constants And Macros *
47*********************************************************************************************************************************/
48/** Saved state data unit version. */
49#define PGM_SAVED_STATE_VERSION 14
50/** Saved state data unit version before the PAE PDPE registers. */
51#define PGM_SAVED_STATE_VERSION_PRE_PAE 13
52/** Saved state data unit version after this includes ballooned page flags in
53 * the state (see @bugref{5515}). */
54#define PGM_SAVED_STATE_VERSION_BALLOON_BROKEN 12
55/** Saved state before the balloon change. */
56#define PGM_SAVED_STATE_VERSION_PRE_BALLOON 11
57/** Saved state data unit version used during 3.1 development, misses the RAM
58 * config. */
59#define PGM_SAVED_STATE_VERSION_NO_RAM_CFG 10
60/** Saved state data unit version for 3.0 (pre teleportation). */
61#define PGM_SAVED_STATE_VERSION_3_0_0 9
62/** Saved state data unit version for 2.2.2 and later. */
63#define PGM_SAVED_STATE_VERSION_2_2_2 8
64/** Saved state data unit version for 2.2.0. */
65#define PGM_SAVED_STATE_VERSION_RR_DESC 7
66/** Saved state data unit version. */
67#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6
68
69
70/** @name Sparse state record types
71 * @{ */
72/** Zero page. No data. */
73#define PGM_STATE_REC_RAM_ZERO UINT8_C(0x00)
74/** Raw page. */
75#define PGM_STATE_REC_RAM_RAW UINT8_C(0x01)
76/** Raw MMIO2 page. */
77#define PGM_STATE_REC_MMIO2_RAW UINT8_C(0x02)
78/** Zero MMIO2 page. */
79#define PGM_STATE_REC_MMIO2_ZERO UINT8_C(0x03)
80/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
81#define PGM_STATE_REC_ROM_VIRGIN UINT8_C(0x04)
82/** Raw shadowed ROM page. The protection (8-bit) precedes the raw bits. */
83#define PGM_STATE_REC_ROM_SHW_RAW UINT8_C(0x05)
84/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
85#define PGM_STATE_REC_ROM_SHW_ZERO UINT8_C(0x06)
86/** ROM protection (8-bit). */
87#define PGM_STATE_REC_ROM_PROT UINT8_C(0x07)
88/** Ballooned page. No data. */
89#define PGM_STATE_REC_RAM_BALLOONED UINT8_C(0x08)
90/** The last record type. */
91#define PGM_STATE_REC_LAST PGM_STATE_REC_RAM_BALLOONED
92/** End marker. */
93#define PGM_STATE_REC_END UINT8_C(0xff)
94/** Flag indicating that the data is preceded by the page address.
95 * For RAW pages this is a RTGCPHYS. For MMIO2 and ROM pages this is a 8-bit
96 * range ID and a 32-bit page index.
97 */
98#define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80)
99/** @} */
100
101/** The CRC-32 for a zero page. */
102#define PGM_STATE_CRC32_ZERO_PAGE UINT32_C(0xc71c0011)
103/** The CRC-32 for a zero half page. */
104#define PGM_STATE_CRC32_ZERO_HALF_PAGE UINT32_C(0xf1e8ba9e)
105
106
107
108/** @name Old Page types used in older saved states.
109 * @{ */
110/** Old saved state: The usual invalid zero entry. */
111#define PGMPAGETYPE_OLD_INVALID 0
112/** Old saved state: RAM page. (RWX) */
113#define PGMPAGETYPE_OLD_RAM 1
114/** Old saved state: MMIO2 page. (RWX) */
115#define PGMPAGETYPE_OLD_MMIO2 1
116/** Old saved state: MMIO2 page aliased over an MMIO page. (RWX)
117 * See PGMHandlerPhysicalPageAlias(). */
118#define PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO 2
119/** Old saved state: Shadowed ROM. (RWX) */
120#define PGMPAGETYPE_OLD_ROM_SHADOW 3
121/** Old saved state: ROM page. (R-X) */
122#define PGMPAGETYPE_OLD_ROM 4
123/** Old saved state: MMIO page. (---) */
124#define PGMPAGETYPE_OLD_MMIO 5
125/** @} */
126
127
128/*********************************************************************************************************************************
129* Structures and Typedefs *
130*********************************************************************************************************************************/
131/** For loading old saved states. (pre-smp) */
132typedef struct
133{
134 /** If set no conflict checks are required. (boolean) */
135 bool fMappingsFixed;
136 /** Size of fixed mapping */
137 uint32_t cbMappingFixed;
138 /** Base address (GC) of fixed mapping */
139 RTGCPTR GCPtrMappingFixed;
140 /** A20 gate mask.
141 * Our current approach to A20 emulation is to let REM do it and don't bother
142 * anywhere else. The interesting guests will be operating with it enabled anyway.
143 * But should the need arise, we'll subject physical addresses to this mask. */
144 RTGCPHYS GCPhysA20Mask;
145 /** A20 gate state - boolean! */
146 bool fA20Enabled;
147 /** The guest paging mode. */
148 PGMMODE enmGuestMode;
149} PGMOLD;
150
151
152/*********************************************************************************************************************************
153* Global Variables *
154*********************************************************************************************************************************/
155/** PGM fields to save/load. */
156
157static const SSMFIELD s_aPGMFields[] =
158{
159 SSMFIELD_ENTRY( PGM, fMappingsFixed),
160 SSMFIELD_ENTRY_GCPTR( PGM, GCPtrMappingFixed),
161 SSMFIELD_ENTRY( PGM, cbMappingFixed),
162 SSMFIELD_ENTRY( PGM, cBalloonedPages),
163 SSMFIELD_ENTRY_TERM()
164};
165
166static const SSMFIELD s_aPGMFieldsPreBalloon[] =
167{
168 SSMFIELD_ENTRY( PGM, fMappingsFixed),
169 SSMFIELD_ENTRY_GCPTR( PGM, GCPtrMappingFixed),
170 SSMFIELD_ENTRY( PGM, cbMappingFixed),
171 SSMFIELD_ENTRY_TERM()
172};
173
174static const SSMFIELD s_aPGMCpuFields[] =
175{
176 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
177 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
178 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
179 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[0]),
180 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[1]),
181 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[2]),
182 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[3]),
183 SSMFIELD_ENTRY_TERM()
184};
185
186static const SSMFIELD s_aPGMCpuFieldsPrePae[] =
187{
188 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
189 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
190 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
191 SSMFIELD_ENTRY_TERM()
192};
193
194static const SSMFIELD s_aPGMFields_Old[] =
195{
196 SSMFIELD_ENTRY( PGMOLD, fMappingsFixed),
197 SSMFIELD_ENTRY_GCPTR( PGMOLD, GCPtrMappingFixed),
198 SSMFIELD_ENTRY( PGMOLD, cbMappingFixed),
199 SSMFIELD_ENTRY( PGMOLD, fA20Enabled),
200 SSMFIELD_ENTRY_GCPHYS( PGMOLD, GCPhysA20Mask),
201 SSMFIELD_ENTRY( PGMOLD, enmGuestMode),
202 SSMFIELD_ENTRY_TERM()
203};
204
205
206/**
207 * Find the ROM tracking structure for the given page.
208 *
209 * @returns Pointer to the ROM page structure. NULL if the caller didn't check
210 * that it's a ROM page.
211 * @param pVM The cross context VM structure.
212 * @param GCPhys The address of the ROM page.
213 */
214static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
215{
216 for (PPGMROMRANGE pRomRange = pVM->pgm.s.CTX_SUFF(pRomRanges);
217 pRomRange;
218 pRomRange = pRomRange->CTX_SUFF(pNext))
219 {
220 RTGCPHYS off = GCPhys - pRomRange->GCPhys;
221 if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
222 return &pRomRange->aPages[off >> PAGE_SHIFT];
223 }
224 return NULL;
225}
226
227
228/**
229 * Prepares the ROM pages for a live save.
230 *
231 * @returns VBox status code.
232 * @param pVM The cross context VM structure.
233 */
234static int pgmR3PrepRomPages(PVM pVM)
235{
236 /*
237 * Initialize the live save tracking in the ROM page descriptors.
238 */
239 pgmLock(pVM);
240 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
241 {
242 PPGMRAMRANGE pRamHint = NULL;;
243 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
244
245 for (uint32_t iPage = 0; iPage < cPages; iPage++)
246 {
247 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)PGMROMPROT_INVALID;
248 pRom->aPages[iPage].LiveSave.fWrittenTo = false;
249 pRom->aPages[iPage].LiveSave.fDirty = true;
250 pRom->aPages[iPage].LiveSave.fDirtiedRecently = true;
251 if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
252 {
253 if (PGMROMPROT_IS_ROM(pRom->aPages[iPage].enmProt))
254 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
255 else
256 {
257 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
258 PPGMPAGE pPage;
259 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
260 AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys));
261 if (RT_SUCCESS(rc))
262 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(pPage) && !PGM_PAGE_IS_BALLOONED(pPage);
263 else
264 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
265 }
266 }
267 }
268
269 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
270 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
271 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
272 }
273 pgmUnlock(pVM);
274
275 return VINF_SUCCESS;
276}
277
278
279/**
280 * Assigns IDs to the ROM ranges and saves them.
281 *
282 * @returns VBox status code.
283 * @param pVM The cross context VM structure.
284 * @param pSSM Saved state handle.
285 */
286static int pgmR3SaveRomRanges(PVM pVM, PSSMHANDLE pSSM)
287{
288 pgmLock(pVM);
289 uint8_t id = 1;
290 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3, id++)
291 {
292 pRom->idSavedState = id;
293 SSMR3PutU8(pSSM, id);
294 SSMR3PutStrZ(pSSM, ""); /* device name */
295 SSMR3PutU32(pSSM, 0); /* device instance */
296 SSMR3PutU8(pSSM, 0); /* region */
297 SSMR3PutStrZ(pSSM, pRom->pszDesc);
298 SSMR3PutGCPhys(pSSM, pRom->GCPhys);
299 int rc = SSMR3PutGCPhys(pSSM, pRom->cb);
300 if (RT_FAILURE(rc))
301 break;
302 }
303 pgmUnlock(pVM);
304 return SSMR3PutU8(pSSM, UINT8_MAX);
305}
306
307
308/**
309 * Loads the ROM range ID assignments.
310 *
311 * @returns VBox status code.
312 *
313 * @param pVM The cross context VM structure.
314 * @param pSSM The saved state handle.
315 */
316static int pgmR3LoadRomRanges(PVM pVM, PSSMHANDLE pSSM)
317{
318 PGM_LOCK_ASSERT_OWNER(pVM);
319
320 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
321 pRom->idSavedState = UINT8_MAX;
322
323 for (;;)
324 {
325 /*
326 * Read the data.
327 */
328 uint8_t id;
329 int rc = SSMR3GetU8(pSSM, &id);
330 if (RT_FAILURE(rc))
331 return rc;
332 if (id == UINT8_MAX)
333 {
334 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
335 AssertLogRelMsg(pRom->idSavedState != UINT8_MAX,
336 ("The \"%s\" ROM was not found in the saved state. Probably due to some misconfiguration\n",
337 pRom->pszDesc));
338 return VINF_SUCCESS; /* the end */
339 }
340 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
341
342 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
343 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
344 AssertLogRelRCReturn(rc, rc);
345
346 uint32_t uInstance;
347 SSMR3GetU32(pSSM, &uInstance);
348 uint8_t iRegion;
349 SSMR3GetU8(pSSM, &iRegion);
350
351 char szDesc[64];
352 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
353 AssertLogRelRCReturn(rc, rc);
354
355 RTGCPHYS GCPhys;
356 SSMR3GetGCPhys(pSSM, &GCPhys);
357 RTGCPHYS cb;
358 rc = SSMR3GetGCPhys(pSSM, &cb);
359 if (RT_FAILURE(rc))
360 return rc;
361 AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
362 AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
363
364 /*
365 * Locate a matching ROM range.
366 */
367 AssertLogRelMsgReturn( uInstance == 0
368 && iRegion == 0
369 && szDevName[0] == '\0',
370 ("GCPhys=%RGp %s\n", GCPhys, szDesc),
371 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
372 PPGMROMRANGE pRom;
373 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
374 {
375 if ( pRom->idSavedState == UINT8_MAX
376 && !strcmp(pRom->pszDesc, szDesc))
377 {
378 pRom->idSavedState = id;
379 break;
380 }
381 }
382 if (!pRom)
383 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("ROM at %RGp by the name '%s' was not found"), GCPhys, szDesc);
384 } /* forever */
385}
386
387
388/**
389 * Scan ROM pages.
390 *
391 * @param pVM The cross context VM structure.
392 */
393static void pgmR3ScanRomPages(PVM pVM)
394{
395 /*
396 * The shadow ROMs.
397 */
398 pgmLock(pVM);
399 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
400 {
401 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
402 {
403 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
404 for (uint32_t iPage = 0; iPage < cPages; iPage++)
405 {
406 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
407 if (pRomPage->LiveSave.fWrittenTo)
408 {
409 pRomPage->LiveSave.fWrittenTo = false;
410 if (!pRomPage->LiveSave.fDirty)
411 {
412 pRomPage->LiveSave.fDirty = true;
413 pVM->pgm.s.LiveSave.Rom.cReadyPages--;
414 pVM->pgm.s.LiveSave.Rom.cDirtyPages++;
415 }
416 pRomPage->LiveSave.fDirtiedRecently = true;
417 }
418 else
419 pRomPage->LiveSave.fDirtiedRecently = false;
420 }
421 }
422 }
423 pgmUnlock(pVM);
424}
425
426
427/**
428 * Takes care of the virgin ROM pages in the first pass.
429 *
430 * This is an attempt at simplifying the handling of ROM pages a little bit.
431 * This ASSUMES that no new ROM ranges will be added and that they won't be
432 * relinked in any way.
433 *
434 * @param pVM The cross context VM structure.
435 * @param pSSM The SSM handle.
436 * @param fLiveSave Whether we're in a live save or not.
437 */
438static int pgmR3SaveRomVirginPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
439{
440 pgmLock(pVM);
441 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
442 {
443 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
444 for (uint32_t iPage = 0; iPage < cPages; iPage++)
445 {
446 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
447 PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;
448
449 /* Get the virgin page descriptor. */
450 PPGMPAGE pPage;
451 if (PGMROMPROT_IS_ROM(enmProt))
452 pPage = pgmPhysGetPage(pVM, GCPhys);
453 else
454 pPage = &pRom->aPages[iPage].Virgin;
455
456 /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
457 int rc = VINF_SUCCESS;
458 char abPage[PAGE_SIZE];
459 if ( !PGM_PAGE_IS_ZERO(pPage)
460 && !PGM_PAGE_IS_BALLOONED(pPage))
461 {
462 void const *pvPage;
463 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
464 if (RT_SUCCESS(rc))
465 memcpy(abPage, pvPage, PAGE_SIZE);
466 }
467 else
468 ASMMemZeroPage(abPage);
469 pgmUnlock(pVM);
470 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
471
472 /* Save it. */
473 if (iPage > 0)
474 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
475 else
476 {
477 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
478 SSMR3PutU8(pSSM, pRom->idSavedState);
479 SSMR3PutU32(pSSM, iPage);
480 }
481 SSMR3PutU8(pSSM, (uint8_t)enmProt);
482 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
483 if (RT_FAILURE(rc))
484 return rc;
485
486 /* Update state. */
487 pgmLock(pVM);
488 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
489 if (fLiveSave)
490 {
491 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
492 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
493 pVM->pgm.s.LiveSave.cSavedPages++;
494 }
495 }
496 }
497 pgmUnlock(pVM);
498 return VINF_SUCCESS;
499}
500
501
502/**
503 * Saves dirty pages in the shadowed ROM ranges.
504 *
505 * Used by pgmR3LiveExecPart2 and pgmR3SaveExecMemory.
506 *
507 * @returns VBox status code.
508 * @param pVM The cross context VM structure.
509 * @param pSSM The SSM handle.
510 * @param fLiveSave Whether it's a live save or not.
511 * @param fFinalPass Whether this is the final pass or not.
512 */
513static int pgmR3SaveShadowedRomPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
514{
515 /*
516 * The Shadowed ROMs.
517 *
518 * ASSUMES that the ROM ranges are fixed.
519 * ASSUMES that all the ROM ranges are mapped.
520 */
521 pgmLock(pVM);
522 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
523 {
524 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
525 {
526 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
527 uint32_t iPrevPage = cPages;
528 for (uint32_t iPage = 0; iPage < cPages; iPage++)
529 {
530 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
531 if ( !fLiveSave
532 || ( pRomPage->LiveSave.fDirty
533 && ( ( !pRomPage->LiveSave.fDirtiedRecently
534 && !pRomPage->LiveSave.fWrittenTo)
535 || fFinalPass
536 )
537 )
538 )
539 {
540 uint8_t abPage[PAGE_SIZE];
541 PGMROMPROT enmProt = pRomPage->enmProt;
542 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
543 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(pVM, GCPhys);
544 bool fZero = PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_BALLOONED(pPage); Assert(!PGM_PAGE_IS_BALLOONED(pPage)); /* Shouldn't be ballooned. */
545 int rc = VINF_SUCCESS;
546 if (!fZero)
547 {
548 void const *pvPage;
549 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
550 if (RT_SUCCESS(rc))
551 memcpy(abPage, pvPage, PAGE_SIZE);
552 }
553 if (fLiveSave && RT_SUCCESS(rc))
554 {
555 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
556 pRomPage->LiveSave.fDirty = false;
557 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
558 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
559 pVM->pgm.s.LiveSave.cSavedPages++;
560 }
561 pgmUnlock(pVM);
562 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
563
564 if (iPage - 1U == iPrevPage && iPage > 0)
565 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW));
566 else
567 {
568 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW) | PGM_STATE_REC_FLAG_ADDR);
569 SSMR3PutU8(pSSM, pRom->idSavedState);
570 SSMR3PutU32(pSSM, iPage);
571 }
572 rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
573 if (!fZero)
574 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
575 if (RT_FAILURE(rc))
576 return rc;
577
578 pgmLock(pVM);
579 iPrevPage = iPage;
580 }
581 /*
582 * In the final pass, make sure the protection is in sync.
583 */
584 else if ( fFinalPass
585 && pRomPage->LiveSave.u8Prot != pRomPage->enmProt)
586 {
587 PGMROMPROT enmProt = pRomPage->enmProt;
588 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
589 pgmUnlock(pVM);
590
591 if (iPage - 1U == iPrevPage && iPage > 0)
592 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT);
593 else
594 {
595 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT | PGM_STATE_REC_FLAG_ADDR);
596 SSMR3PutU8(pSSM, pRom->idSavedState);
597 SSMR3PutU32(pSSM, iPage);
598 }
599 int rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
600 if (RT_FAILURE(rc))
601 return rc;
602
603 pgmLock(pVM);
604 iPrevPage = iPage;
605 }
606 }
607 }
608 }
609 pgmUnlock(pVM);
610 return VINF_SUCCESS;
611}
612
613
614/**
615 * Cleans up ROM pages after a live save.
616 *
617 * @param pVM The cross context VM structure.
618 */
619static void pgmR3DoneRomPages(PVM pVM)
620{
621 NOREF(pVM);
622}
623
624
625/**
626 * Prepares the MMIO2 pages for a live save.
627 *
628 * @returns VBox status code.
629 * @param pVM The cross context VM structure.
630 */
631static int pgmR3PrepMmio2Pages(PVM pVM)
632{
633 /*
634 * Initialize the live save tracking in the MMIO2 ranges.
635 * ASSUME nothing changes here.
636 */
637 pgmLock(pVM);
638 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
639 {
640 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
641 {
642 uint32_t const cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT;
643 pgmUnlock(pVM);
644
645 PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
646 if (!paLSPages)
647 return VERR_NO_MEMORY;
648 for (uint32_t iPage = 0; iPage < cPages; iPage++)
649 {
650 /* Initialize it as a dirty zero page. */
651 paLSPages[iPage].fDirty = true;
652 paLSPages[iPage].cUnchangedScans = 0;
653 paLSPages[iPage].fZero = true;
654 paLSPages[iPage].u32CrcH1 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
655 paLSPages[iPage].u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
656 }
657
658 pgmLock(pVM);
659 pRegMmio->paLSPages = paLSPages;
660 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
661 }
662 }
663 pgmUnlock(pVM);
664 return VINF_SUCCESS;
665}
666
667
668/**
669 * Assigns IDs to the MMIO2 ranges and saves them.
670 *
671 * @returns VBox status code.
672 * @param pVM The cross context VM structure.
673 * @param pSSM Saved state handle.
674 */
675static int pgmR3SaveMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
676{
677 pgmLock(pVM);
678 uint8_t id = 1;
679 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
680 {
681 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
682 {
683 pRegMmio->idSavedState = id;
684 SSMR3PutU8(pSSM, id);
685 SSMR3PutStrZ(pSSM, pRegMmio->pDevInsR3->pReg->szName);
686 SSMR3PutU32(pSSM, pRegMmio->pDevInsR3->iInstance);
687 SSMR3PutU8(pSSM, pRegMmio->iRegion);
688 SSMR3PutStrZ(pSSM, pRegMmio->RamRange.pszDesc);
689 int rc = SSMR3PutGCPhys(pSSM, pRegMmio->RamRange.cb);
690 if (RT_FAILURE(rc))
691 break;
692 id++;
693 }
694 }
695 pgmUnlock(pVM);
696 return SSMR3PutU8(pSSM, UINT8_MAX);
697}
698
699
700/**
701 * Loads the MMIO2 range ID assignments.
702 *
703 * @returns VBox status code.
704 *
705 * @param pVM The cross context VM structure.
706 * @param pSSM The saved state handle.
707 */
708static int pgmR3LoadMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
709{
710 PGM_LOCK_ASSERT_OWNER(pVM);
711
712 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
713 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
714 pRegMmio->idSavedState = UINT8_MAX;
715
716 for (;;)
717 {
718 /*
719 * Read the data.
720 */
721 uint8_t id;
722 int rc = SSMR3GetU8(pSSM, &id);
723 if (RT_FAILURE(rc))
724 return rc;
725 if (id == UINT8_MAX)
726 {
727 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
728 AssertLogRelMsg( pRegMmio->idSavedState != UINT8_MAX
729 || !(pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2),
730 ("%s\n", pRegMmio->RamRange.pszDesc));
731 return VINF_SUCCESS; /* the end */
732 }
733 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
734
735 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
736 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
737 AssertLogRelRCReturn(rc, rc);
738
739 uint32_t uInstance;
740 SSMR3GetU32(pSSM, &uInstance);
741 uint8_t iRegion;
742 SSMR3GetU8(pSSM, &iRegion);
743
744 char szDesc[64];
745 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
746 AssertLogRelRCReturn(rc, rc);
747
748 RTGCPHYS cb;
749 rc = SSMR3GetGCPhys(pSSM, &cb);
750 AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
751
752 /*
753 * Locate a matching MMIO2 range.
754 */
755 PPGMREGMMIORANGE pRegMmio;
756 for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
757 {
758 if ( pRegMmio->idSavedState == UINT8_MAX
759 && pRegMmio->iRegion == iRegion
760 && pRegMmio->pDevInsR3->iInstance == uInstance
761 && (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
762 && !strcmp(pRegMmio->pDevInsR3->pReg->szName, szDevName))
763 {
764 pRegMmio->idSavedState = id;
765 break;
766 }
767 }
768 if (!pRegMmio)
769 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to locate a MMIO2 range called '%s' owned by %s/%u, region %d"),
770 szDesc, szDevName, uInstance, iRegion);
771
772 /*
773 * Validate the configuration, the size of the MMIO2 region should be
774 * the same.
775 */
776 if (cb != pRegMmio->RamRange.cb)
777 {
778 LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n",
779 pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb));
780 if (cb > pRegMmio->RamRange.cb) /* bad idea? */
781 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp"),
782 pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb);
783 }
784 } /* forever */
785}
786
787
788/**
789 * Scans one MMIO2 page.
790 *
791 * @returns True if changed, false if unchanged.
792 *
793 * @param pVM The cross context VM structure.
794 * @param pbPage The page bits.
795 * @param pLSPage The live save tracking structure for the page.
796 *
797 */
798DECLINLINE(bool) pgmR3ScanMmio2Page(PVM pVM, uint8_t const *pbPage, PPGMLIVESAVEMMIO2PAGE pLSPage)
799{
800 /*
801 * Special handling of zero pages.
802 */
803 bool const fZero = pLSPage->fZero;
804 if (fZero)
805 {
806 if (ASMMemIsZeroPage(pbPage))
807 {
808 /* Not modified. */
809 if (pLSPage->fDirty)
810 pLSPage->cUnchangedScans++;
811 return false;
812 }
813
814 pLSPage->fZero = false;
815 pLSPage->u32CrcH1 = RTCrc32(pbPage, PAGE_SIZE / 2);
816 }
817 else
818 {
819 /*
820 * CRC the first half, if it doesn't match the page is dirty and
821 * we won't check the 2nd half (we'll do that next time).
822 */
823 uint32_t u32CrcH1 = RTCrc32(pbPage, PAGE_SIZE / 2);
824 if (u32CrcH1 == pLSPage->u32CrcH1)
825 {
826 uint32_t u32CrcH2 = RTCrc32(pbPage + PAGE_SIZE / 2, PAGE_SIZE / 2);
827 if (u32CrcH2 == pLSPage->u32CrcH2)
828 {
829 /* Probably not modified. */
830 if (pLSPage->fDirty)
831 pLSPage->cUnchangedScans++;
832 return false;
833 }
834
835 pLSPage->u32CrcH2 = u32CrcH2;
836 }
837 else
838 {
839 pLSPage->u32CrcH1 = u32CrcH1;
840 if ( u32CrcH1 == PGM_STATE_CRC32_ZERO_HALF_PAGE
841 && ASMMemIsZeroPage(pbPage))
842 {
843 pLSPage->u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
844 pLSPage->fZero = true;
845 }
846 }
847 }
848
849 /* dirty page path */
850 pLSPage->cUnchangedScans = 0;
851 if (!pLSPage->fDirty)
852 {
853 pLSPage->fDirty = true;
854 pVM->pgm.s.LiveSave.Mmio2.cReadyPages--;
855 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages++;
856 if (fZero)
857 pVM->pgm.s.LiveSave.Mmio2.cZeroPages--;
858 }
859 return true;
860}
861
862
863/**
864 * Scan for MMIO2 page modifications.
865 *
866 * @param pVM The cross context VM structure.
867 * @param uPass The pass number.
868 */
869static void pgmR3ScanMmio2Pages(PVM pVM, uint32_t uPass)
870{
871 /*
872 * Since this is a bit expensive we lower the scan rate after a little while.
873 */
874 if ( ( (uPass & 3) != 0
875 && uPass > 10)
876 || uPass == SSM_PASS_FINAL)
877 return;
878
879 pgmLock(pVM); /* paranoia */
880 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
881 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
882 {
883 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
884 uint32_t cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT;
885 pgmUnlock(pVM);
886
887 for (uint32_t iPage = 0; iPage < cPages; iPage++)
888 {
889 uint8_t const *pbPage = (uint8_t const *)pRegMmio->pvR3 + iPage * PAGE_SIZE;
890 pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]);
891 }
892
893 pgmLock(pVM);
894 }
895 pgmUnlock(pVM);
896
897}
898
899
900/**
901 * Save quiescent MMIO2 pages.
902 *
903 * @returns VBox status code.
904 * @param pVM The cross context VM structure.
905 * @param pSSM The SSM handle.
906 * @param fLiveSave Whether it's a live save or not.
907 * @param uPass The pass number.
908 */
909static int pgmR3SaveMmio2Pages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
910{
911 /** @todo implement live saving of MMIO2 pages. (Need some way of telling the
912 * device that we wish to know about changes.) */
913
914 int rc = VINF_SUCCESS;
915 if (uPass == SSM_PASS_FINAL)
916 {
917 /*
918 * The mop up round.
919 */
920 pgmLock(pVM);
921 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3;
922 pRegMmio && RT_SUCCESS(rc);
923 pRegMmio = pRegMmio->pNextR3)
924 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
925 {
926 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
927 uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3;
928 uint32_t cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT;
929 uint32_t iPageLast = cPages;
930 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
931 {
932 uint8_t u8Type;
933 if (!fLiveSave)
934 u8Type = ASMMemIsZeroPage(pbPage) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
935 else
936 {
937 /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
938 if ( !paLSPages[iPage].fDirty
939 && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
940 {
941 if (paLSPages[iPage].fZero)
942 continue;
943
944 uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
945 RTSha1(pbPage, PAGE_SIZE, abSha1Hash);
946 if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
947 continue;
948 }
949 u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
950 pVM->pgm.s.LiveSave.cSavedPages++;
951 }
952
953 if (iPage != 0 && iPage == iPageLast + 1)
954 rc = SSMR3PutU8(pSSM, u8Type);
955 else
956 {
957 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
958 SSMR3PutU8(pSSM, pRegMmio->idSavedState);
959 rc = SSMR3PutU32(pSSM, iPage);
960 }
961 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
962 rc = SSMR3PutMem(pSSM, pbPage, PAGE_SIZE);
963 if (RT_FAILURE(rc))
964 break;
965 iPageLast = iPage;
966 }
967 }
968 pgmUnlock(pVM);
969 }
970 /*
971 * Reduce the rate after a little while since the current MMIO2 approach is
972 * a bit expensive.
973 * We position it two passes after the scan pass to avoid saving busy pages.
974 */
975 else if ( uPass <= 10
976 || (uPass & 3) == 2)
977 {
978 pgmLock(pVM);
979 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3;
980 pRegMmio && RT_SUCCESS(rc);
981 pRegMmio = pRegMmio->pNextR3)
982 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
983 {
984 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
985 uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3;
986 uint32_t cPages = pRegMmio->RamRange.cb >> PAGE_SHIFT;
987 uint32_t iPageLast = cPages;
988 pgmUnlock(pVM);
989
990 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
991 {
992 /* Skip clean pages and pages which hasn't quiesced. */
993 if (!paLSPages[iPage].fDirty)
994 continue;
995 if (paLSPages[iPage].cUnchangedScans < 3)
996 continue;
997 if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
998 continue;
999
1000 /* Save it. */
1001 bool const fZero = paLSPages[iPage].fZero;
1002 uint8_t abPage[PAGE_SIZE];
1003 if (!fZero)
1004 {
1005 memcpy(abPage, pbPage, PAGE_SIZE);
1006 RTSha1(abPage, PAGE_SIZE, paLSPages[iPage].abSha1Saved);
1007 }
1008
1009 uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
1010 if (iPage != 0 && iPage == iPageLast + 1)
1011 rc = SSMR3PutU8(pSSM, u8Type);
1012 else
1013 {
1014 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
1015 SSMR3PutU8(pSSM, pRegMmio->idSavedState);
1016 rc = SSMR3PutU32(pSSM, iPage);
1017 }
1018 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
1019 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
1020 if (RT_FAILURE(rc))
1021 break;
1022
1023 /* Housekeeping. */
1024 paLSPages[iPage].fDirty = false;
1025 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
1026 pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
1027 if (u8Type == PGM_STATE_REC_MMIO2_ZERO)
1028 pVM->pgm.s.LiveSave.Mmio2.cZeroPages++;
1029 pVM->pgm.s.LiveSave.cSavedPages++;
1030 iPageLast = iPage;
1031 }
1032
1033 pgmLock(pVM);
1034 }
1035 pgmUnlock(pVM);
1036 }
1037
1038 return rc;
1039}
1040
1041
1042/**
1043 * Cleans up MMIO2 pages after a live save.
1044 *
1045 * @param pVM The cross context VM structure.
1046 */
1047static void pgmR3DoneMmio2Pages(PVM pVM)
1048{
1049 /*
1050 * Free the tracking structures for the MMIO2 pages.
1051 * We do the freeing outside the lock in case the VM is running.
1052 */
1053 pgmLock(pVM);
1054 for (PPGMREGMMIORANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
1055 if (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
1056 {
1057 void *pvMmio2ToFree = pRegMmio->paLSPages;
1058 if (pvMmio2ToFree)
1059 {
1060 pRegMmio->paLSPages = NULL;
1061 pgmUnlock(pVM);
1062 MMR3HeapFree(pvMmio2ToFree);
1063 pgmLock(pVM);
1064 }
1065 }
1066 pgmUnlock(pVM);
1067}
1068
1069
1070/**
1071 * Prepares the RAM pages for a live save.
1072 *
1073 * @returns VBox status code.
1074 * @param pVM The cross context VM structure.
1075 */
1076static int pgmR3PrepRamPages(PVM pVM)
1077{
1078
1079 /*
1080 * Try allocating tracking structures for the ram ranges.
1081 *
1082 * To avoid lock contention, we leave the lock every time we're allocating
1083 * a new array. This means we'll have to ditch the allocation and start
1084 * all over again if the RAM range list changes in-between.
1085 *
1086 * Note! pgmR3SaveDone will always be called and it is therefore responsible
1087 * for cleaning up.
1088 */
1089 PPGMRAMRANGE pCur;
1090 pgmLock(pVM);
1091 do
1092 {
1093 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1094 {
1095 if ( !pCur->paLSPages
1096 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1097 {
1098 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1099 uint32_t const cPages = pCur->cb >> PAGE_SHIFT;
1100 pgmUnlock(pVM);
1101 PPGMLIVESAVERAMPAGE paLSPages = (PPGMLIVESAVERAMPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVERAMPAGE));
1102 if (!paLSPages)
1103 return VERR_NO_MEMORY;
1104 pgmLock(pVM);
1105 if (pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1106 {
1107 pgmUnlock(pVM);
1108 MMR3HeapFree(paLSPages);
1109 pgmLock(pVM);
1110 break; /* try again */
1111 }
1112 pCur->paLSPages = paLSPages;
1113
1114 /*
1115 * Initialize the array.
1116 */
1117 uint32_t iPage = cPages;
1118 while (iPage-- > 0)
1119 {
1120 /** @todo yield critsect! (after moving this away from EMT0) */
1121 PCPGMPAGE pPage = &pCur->aPages[iPage];
1122 paLSPages[iPage].cDirtied = 0;
1123 paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */
1124 paLSPages[iPage].fWriteMonitored = 0;
1125 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1126 paLSPages[iPage].u2Reserved = 0;
1127 switch (PGM_PAGE_GET_TYPE(pPage))
1128 {
1129 case PGMPAGETYPE_RAM:
1130 if ( PGM_PAGE_IS_ZERO(pPage)
1131 || PGM_PAGE_IS_BALLOONED(pPage))
1132 {
1133 paLSPages[iPage].fZero = 1;
1134 paLSPages[iPage].fShared = 0;
1135#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1136 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1137#endif
1138 }
1139 else if (PGM_PAGE_IS_SHARED(pPage))
1140 {
1141 paLSPages[iPage].fZero = 0;
1142 paLSPages[iPage].fShared = 1;
1143#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1144 paLSPages[iPage].u32Crc = UINT32_MAX;
1145#endif
1146 }
1147 else
1148 {
1149 paLSPages[iPage].fZero = 0;
1150 paLSPages[iPage].fShared = 0;
1151#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1152 paLSPages[iPage].u32Crc = UINT32_MAX;
1153#endif
1154 }
1155 paLSPages[iPage].fIgnore = 0;
1156 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1157 break;
1158
1159 case PGMPAGETYPE_ROM_SHADOW:
1160 case PGMPAGETYPE_ROM:
1161 {
1162 paLSPages[iPage].fZero = 0;
1163 paLSPages[iPage].fShared = 0;
1164 paLSPages[iPage].fDirty = 0;
1165 paLSPages[iPage].fIgnore = 1;
1166#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1167 paLSPages[iPage].u32Crc = UINT32_MAX;
1168#endif
1169 pVM->pgm.s.LiveSave.cIgnoredPages++;
1170 break;
1171 }
1172
1173 default:
1174 AssertMsgFailed(("%R[pgmpage]", pPage));
1175 RT_FALL_THRU();
1176 case PGMPAGETYPE_MMIO2:
1177 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1178 paLSPages[iPage].fZero = 0;
1179 paLSPages[iPage].fShared = 0;
1180 paLSPages[iPage].fDirty = 0;
1181 paLSPages[iPage].fIgnore = 1;
1182#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1183 paLSPages[iPage].u32Crc = UINT32_MAX;
1184#endif
1185 pVM->pgm.s.LiveSave.cIgnoredPages++;
1186 break;
1187
1188 case PGMPAGETYPE_MMIO:
1189 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
1190 paLSPages[iPage].fZero = 0;
1191 paLSPages[iPage].fShared = 0;
1192 paLSPages[iPage].fDirty = 0;
1193 paLSPages[iPage].fIgnore = 1;
1194#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1195 paLSPages[iPage].u32Crc = UINT32_MAX;
1196#endif
1197 pVM->pgm.s.LiveSave.cIgnoredPages++;
1198 break;
1199 }
1200 }
1201 }
1202 }
1203 } while (pCur);
1204 pgmUnlock(pVM);
1205
1206 return VINF_SUCCESS;
1207}
1208
1209
1210/**
1211 * Saves the RAM configuration.
1212 *
1213 * @returns VBox status code.
1214 * @param pVM The cross context VM structure.
1215 * @param pSSM The saved state handle.
1216 */
1217static int pgmR3SaveRamConfig(PVM pVM, PSSMHANDLE pSSM)
1218{
1219 uint32_t cbRamHole = 0;
1220 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
1221 AssertRCReturn(rc, rc);
1222
1223 uint64_t cbRam = 0;
1224 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
1225 AssertRCReturn(rc, rc);
1226
1227 SSMR3PutU32(pSSM, cbRamHole);
1228 return SSMR3PutU64(pSSM, cbRam);
1229}
1230
1231
1232/**
1233 * Loads and verifies the RAM configuration.
1234 *
1235 * @returns VBox status code.
1236 * @param pVM The cross context VM structure.
1237 * @param pSSM The saved state handle.
1238 */
1239static int pgmR3LoadRamConfig(PVM pVM, PSSMHANDLE pSSM)
1240{
1241 uint32_t cbRamHoleCfg = 0;
1242 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHoleCfg, MM_RAM_HOLE_SIZE_DEFAULT);
1243 AssertRCReturn(rc, rc);
1244
1245 uint64_t cbRamCfg = 0;
1246 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRamCfg, 0);
1247 AssertRCReturn(rc, rc);
1248
1249 uint32_t cbRamHoleSaved;
1250 SSMR3GetU32(pSSM, &cbRamHoleSaved);
1251
1252 uint64_t cbRamSaved;
1253 rc = SSMR3GetU64(pSSM, &cbRamSaved);
1254 AssertRCReturn(rc, rc);
1255
1256 if ( cbRamHoleCfg != cbRamHoleSaved
1257 || cbRamCfg != cbRamSaved)
1258 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Ram config mismatch: saved=%RX64/%RX32 config=%RX64/%RX32 (RAM/Hole)"),
1259 cbRamSaved, cbRamHoleSaved, cbRamCfg, cbRamHoleCfg);
1260 return VINF_SUCCESS;
1261}
1262
1263#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1264
1265/**
1266 * Calculates the CRC-32 for a RAM page and updates the live save page tracking
1267 * info with it.
1268 *
1269 * @param pVM The cross context VM structure.
1270 * @param pCur The current RAM range.
1271 * @param paLSPages The current array of live save page tracking
1272 * structures.
1273 * @param iPage The page index.
1274 */
1275static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
1276{
1277 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1278 PGMPAGEMAPLOCK PgMpLck;
1279 void const *pvPage;
1280 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1281 if (RT_SUCCESS(rc))
1282 {
1283 paLSPages[iPage].u32Crc = RTCrc32(pvPage, PAGE_SIZE);
1284 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1285 }
1286 else
1287 paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */
1288}
1289
1290
1291/**
1292 * Verifies the CRC-32 for a page given it's raw bits.
1293 *
1294 * @param pvPage The page bits.
1295 * @param pCur The current RAM range.
1296 * @param paLSPages The current array of live save page tracking
1297 * structures.
1298 * @param iPage The page index.
1299 */
1300static void pgmR3StateVerifyCrc32ForPage(void const *pvPage, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1301{
1302 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1303 {
1304 uint32_t u32Crc = RTCrc32(pvPage, PAGE_SIZE);
1305 Assert( ( !PGM_PAGE_IS_ZERO(&pCur->aPages[iPage])
1306 && !PGM_PAGE_IS_BALLOONED(&pCur->aPages[iPage]))
1307 || u32Crc == PGM_STATE_CRC32_ZERO_PAGE);
1308 AssertMsg(paLSPages[iPage].u32Crc == u32Crc,
1309 ("%08x != %08x for %RGp %R[pgmpage] %s\n", paLSPages[iPage].u32Crc, u32Crc,
1310 pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pCur->aPages[iPage], pszWhere));
1311 }
1312}
1313
1314
1315/**
1316 * Verifies the CRC-32 for a RAM page.
1317 *
1318 * @param pVM The cross context VM structure.
1319 * @param pCur The current RAM range.
1320 * @param paLSPages The current array of live save page tracking
1321 * structures.
1322 * @param iPage The page index.
1323 */
1324static void pgmR3StateVerifyCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1325{
1326 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1327 {
1328 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1329 PGMPAGEMAPLOCK PgMpLck;
1330 void const *pvPage;
1331 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1332 if (RT_SUCCESS(rc))
1333 {
1334 pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage, pszWhere);
1335 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1336 }
1337 }
1338}
1339
1340#endif /* PGMLIVESAVERAMPAGE_WITH_CRC32 */
1341
1342/**
1343 * Scan for RAM page modifications and reprotect them.
1344 *
1345 * @param pVM The cross context VM structure.
1346 * @param fFinalPass Whether this is the final pass or not.
1347 */
1348static void pgmR3ScanRamPages(PVM pVM, bool fFinalPass)
1349{
1350 /*
1351 * The RAM.
1352 */
1353 RTGCPHYS GCPhysCur = 0;
1354 PPGMRAMRANGE pCur;
1355 pgmLock(pVM);
1356 do
1357 {
1358 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1359 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1360 {
1361 if ( pCur->GCPhysLast > GCPhysCur
1362 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1363 {
1364 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1365 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
1366 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
1367 GCPhysCur = 0;
1368 for (; iPage < cPages; iPage++)
1369 {
1370 /* Do yield first. */
1371 if ( !fFinalPass
1372#ifndef PGMLIVESAVERAMPAGE_WITH_CRC32
1373 && (iPage & 0x7ff) == 0x100
1374#endif
1375 && PDMR3CritSectYield(&pVM->pgm.s.CritSectX)
1376 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1377 {
1378 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1379 break; /* restart */
1380 }
1381
1382 /* Skip already ignored pages. */
1383 if (paLSPages[iPage].fIgnore)
1384 continue;
1385
1386 if (RT_LIKELY(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM))
1387 {
1388 /*
1389 * A RAM page.
1390 */
1391 switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
1392 {
1393 case PGM_PAGE_STATE_ALLOCATED:
1394 /** @todo Optimize this: Don't always re-enable write
1395 * monitoring if the page is known to be very busy. */
1396 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1397 {
1398 AssertMsg(paLSPages[iPage].fWriteMonitored,
1399 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pCur->aPages[iPage]));
1400 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1401 Assert(pVM->pgm.s.cWrittenToPages > 0);
1402 pVM->pgm.s.cWrittenToPages--;
1403 }
1404 else
1405 {
1406 AssertMsg(!paLSPages[iPage].fWriteMonitored,
1407 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pCur->aPages[iPage]));
1408 pVM->pgm.s.LiveSave.Ram.cMonitoredPages++;
1409 }
1410
1411 if (!paLSPages[iPage].fDirty)
1412 {
1413 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1414 if (paLSPages[iPage].fZero)
1415 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1416 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1417 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1418 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1419 }
1420
1421 pgmPhysPageWriteMonitor(pVM, &pCur->aPages[iPage],
1422 pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1423 paLSPages[iPage].fWriteMonitored = 1;
1424 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1425 paLSPages[iPage].fDirty = 1;
1426 paLSPages[iPage].fZero = 0;
1427 paLSPages[iPage].fShared = 0;
1428#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1429 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1430#endif
1431 break;
1432
1433 case PGM_PAGE_STATE_WRITE_MONITORED:
1434 Assert(paLSPages[iPage].fWriteMonitored);
1435 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) == 0)
1436 {
1437#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1438 if (paLSPages[iPage].fWriteMonitoredJustNow)
1439 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1440 else
1441 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "scan");
1442#endif
1443 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1444 }
1445 else
1446 {
1447 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1448#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1449 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1450#endif
1451 if (!paLSPages[iPage].fDirty)
1452 {
1453 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1454 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1455 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1456 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1457 }
1458 }
1459 break;
1460
1461 case PGM_PAGE_STATE_ZERO:
1462 case PGM_PAGE_STATE_BALLOONED:
1463 if (!paLSPages[iPage].fZero)
1464 {
1465 if (!paLSPages[iPage].fDirty)
1466 {
1467 paLSPages[iPage].fDirty = 1;
1468 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1469 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1470 }
1471 paLSPages[iPage].fZero = 1;
1472 paLSPages[iPage].fShared = 0;
1473#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1474 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1475#endif
1476 }
1477 break;
1478
1479 case PGM_PAGE_STATE_SHARED:
1480 if (!paLSPages[iPage].fShared)
1481 {
1482 if (!paLSPages[iPage].fDirty)
1483 {
1484 paLSPages[iPage].fDirty = 1;
1485 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1486 if (paLSPages[iPage].fZero)
1487 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1488 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1489 }
1490 paLSPages[iPage].fZero = 0;
1491 paLSPages[iPage].fShared = 1;
1492#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1493 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1494#endif
1495 }
1496 break;
1497 }
1498 }
1499 else
1500 {
1501 /*
1502 * All other types => Ignore the page.
1503 */
1504 Assert(!paLSPages[iPage].fIgnore); /* skipped before switch */
1505 paLSPages[iPage].fIgnore = 1;
1506 if (paLSPages[iPage].fWriteMonitored)
1507 {
1508 /** @todo this doesn't hold water when we start monitoring MMIO2 and ROM shadow
1509 * pages! */
1510 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
1511 {
1512 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
1513 PGM_PAGE_SET_STATE(pVM, &pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
1514 Assert(pVM->pgm.s.cMonitoredPages > 0);
1515 pVM->pgm.s.cMonitoredPages--;
1516 }
1517 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1518 {
1519 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1520 Assert(pVM->pgm.s.cWrittenToPages > 0);
1521 pVM->pgm.s.cWrittenToPages--;
1522 }
1523 pVM->pgm.s.LiveSave.Ram.cMonitoredPages--;
1524 }
1525
1526 /** @todo the counting doesn't quite work out here. fix later? */
1527 if (paLSPages[iPage].fDirty)
1528 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1529 else
1530 {
1531 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1532 if (paLSPages[iPage].fZero)
1533 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1534 }
1535 pVM->pgm.s.LiveSave.cIgnoredPages++;
1536 }
1537 } /* for each page in range */
1538
1539 if (GCPhysCur != 0)
1540 break; /* Yield + ramrange change */
1541 GCPhysCur = pCur->GCPhysLast;
1542 }
1543 } /* for each range */
1544 } while (pCur);
1545 pgmUnlock(pVM);
1546}
1547
1548
1549/**
1550 * Save quiescent RAM pages.
1551 *
1552 * @returns VBox status code.
1553 * @param pVM The cross context VM structure.
1554 * @param pSSM The SSM handle.
1555 * @param fLiveSave Whether it's a live save or not.
1556 * @param uPass The pass number.
1557 */
1558static int pgmR3SaveRamPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
1559{
1560 NOREF(fLiveSave);
1561
1562 /*
1563 * The RAM.
1564 */
1565 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
1566 RTGCPHYS GCPhysCur = 0;
1567 PPGMRAMRANGE pCur;
1568
1569 pgmLock(pVM);
1570 do
1571 {
1572 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1573 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1574 {
1575 if ( pCur->GCPhysLast > GCPhysCur
1576 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1577 {
1578 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1579 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
1580 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
1581 GCPhysCur = 0;
1582 for (; iPage < cPages; iPage++)
1583 {
1584 /* Do yield first. */
1585 if ( uPass != SSM_PASS_FINAL
1586 && (iPage & 0x7ff) == 0x100
1587 && PDMR3CritSectYield(&pVM->pgm.s.CritSectX)
1588 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1589 {
1590 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1591 break; /* restart */
1592 }
1593
1594 PPGMPAGE pCurPage = &pCur->aPages[iPage];
1595
1596 /*
1597 * Only save pages that haven't changed since last scan and are dirty.
1598 */
1599 if ( uPass != SSM_PASS_FINAL
1600 && paLSPages)
1601 {
1602 if (!paLSPages[iPage].fDirty)
1603 continue;
1604 if (paLSPages[iPage].fWriteMonitoredJustNow)
1605 continue;
1606 if (paLSPages[iPage].fIgnore)
1607 continue;
1608 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM) /* in case of recent remappings */
1609 continue;
1610 if ( PGM_PAGE_GET_STATE(pCurPage)
1611 != ( paLSPages[iPage].fZero
1612 ? PGM_PAGE_STATE_ZERO
1613 : paLSPages[iPage].fShared
1614 ? PGM_PAGE_STATE_SHARED
1615 : PGM_PAGE_STATE_WRITE_MONITORED))
1616 continue;
1617 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) > 0)
1618 continue;
1619 }
1620 else
1621 {
1622 if ( paLSPages
1623 && !paLSPages[iPage].fDirty
1624 && !paLSPages[iPage].fIgnore)
1625 {
1626#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1627 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1628 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#1");
1629#endif
1630 continue;
1631 }
1632 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1633 continue;
1634 }
1635
1636 /*
1637 * Do the saving outside the PGM critsect since SSM may block on I/O.
1638 */
1639 int rc;
1640 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1641 bool fZero = PGM_PAGE_IS_ZERO(pCurPage);
1642 bool fBallooned = PGM_PAGE_IS_BALLOONED(pCurPage);
1643 bool fSkipped = false;
1644
1645 if (!fZero && !fBallooned)
1646 {
1647 /*
1648 * Copy the page and then save it outside the lock (since any
1649 * SSM call may block).
1650 */
1651 uint8_t abPage[PAGE_SIZE];
1652 PGMPAGEMAPLOCK PgMpLck;
1653 void const *pvPage;
1654 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage, &PgMpLck);
1655 if (RT_SUCCESS(rc))
1656 {
1657 memcpy(abPage, pvPage, PAGE_SIZE);
1658#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1659 if (paLSPages)
1660 pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage, "save#3");
1661#endif
1662 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1663 }
1664 pgmUnlock(pVM);
1665 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
1666
1667 /* Try save some memory when restoring. */
1668 if (!ASMMemIsZeroPage(pvPage))
1669 {
1670 if (GCPhys == GCPhysLast + PAGE_SIZE)
1671 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
1672 else
1673 {
1674 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
1675 SSMR3PutGCPhys(pSSM, GCPhys);
1676 }
1677 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
1678 }
1679 else
1680 {
1681 if (GCPhys == GCPhysLast + PAGE_SIZE)
1682 rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO);
1683 else
1684 {
1685 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO | PGM_STATE_REC_FLAG_ADDR);
1686 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1687 }
1688 }
1689 }
1690 else
1691 {
1692 /*
1693 * Dirty zero or ballooned page.
1694 */
1695#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1696 if (paLSPages)
1697 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#2");
1698#endif
1699 pgmUnlock(pVM);
1700
1701 uint8_t u8RecType = fBallooned ? PGM_STATE_REC_RAM_BALLOONED : PGM_STATE_REC_RAM_ZERO;
1702 if (GCPhys == GCPhysLast + PAGE_SIZE)
1703 rc = SSMR3PutU8(pSSM, u8RecType);
1704 else
1705 {
1706 SSMR3PutU8(pSSM, u8RecType | PGM_STATE_REC_FLAG_ADDR);
1707 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1708 }
1709 }
1710 if (RT_FAILURE(rc))
1711 return rc;
1712
1713 pgmLock(pVM);
1714 if (!fSkipped)
1715 GCPhysLast = GCPhys;
1716 if (paLSPages)
1717 {
1718 paLSPages[iPage].fDirty = 0;
1719 pVM->pgm.s.LiveSave.Ram.cReadyPages++;
1720 if (fZero)
1721 pVM->pgm.s.LiveSave.Ram.cZeroPages++;
1722 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1723 pVM->pgm.s.LiveSave.cSavedPages++;
1724 }
1725 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1726 {
1727 GCPhysCur = GCPhys | PAGE_OFFSET_MASK;
1728 break; /* restart */
1729 }
1730
1731 } /* for each page in range */
1732
1733 if (GCPhysCur != 0)
1734 break; /* Yield + ramrange change */
1735 GCPhysCur = pCur->GCPhysLast;
1736 }
1737 } /* for each range */
1738 } while (pCur);
1739
1740 pgmUnlock(pVM);
1741
1742 return VINF_SUCCESS;
1743}
1744
1745
1746/**
1747 * Cleans up RAM pages after a live save.
1748 *
1749 * @param pVM The cross context VM structure.
1750 */
1751static void pgmR3DoneRamPages(PVM pVM)
1752{
1753 /*
1754 * Free the tracking arrays and disable write monitoring.
1755 *
1756 * Play nice with the PGM lock in case we're called while the VM is still
1757 * running. This means we have to delay the freeing since we wish to use
1758 * paLSPages as an indicator of which RAM ranges which we need to scan for
1759 * write monitored pages.
1760 */
1761 void *pvToFree = NULL;
1762 PPGMRAMRANGE pCur;
1763 uint32_t cMonitoredPages = 0;
1764 pgmLock(pVM);
1765 do
1766 {
1767 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1768 {
1769 if (pCur->paLSPages)
1770 {
1771 if (pvToFree)
1772 {
1773 uint32_t idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1774 pgmUnlock(pVM);
1775 MMR3HeapFree(pvToFree);
1776 pvToFree = NULL;
1777 pgmLock(pVM);
1778 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1779 break; /* start over again. */
1780 }
1781
1782 pvToFree = pCur->paLSPages;
1783 pCur->paLSPages = NULL;
1784
1785 uint32_t iPage = pCur->cb >> PAGE_SHIFT;
1786 while (iPage--)
1787 {
1788 PPGMPAGE pPage = &pCur->aPages[iPage];
1789 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
1790 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1791 {
1792 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1793 cMonitoredPages++;
1794 }
1795 }
1796 }
1797 }
1798 } while (pCur);
1799
1800 Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages);
1801 if (pVM->pgm.s.cMonitoredPages < cMonitoredPages)
1802 pVM->pgm.s.cMonitoredPages = 0;
1803 else
1804 pVM->pgm.s.cMonitoredPages -= cMonitoredPages;
1805
1806 pgmUnlock(pVM);
1807
1808 MMR3HeapFree(pvToFree);
1809 pvToFree = NULL;
1810}
1811
1812
1813/**
1814 * @callback_method_impl{FNSSMINTLIVEEXEC}
1815 */
1816static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1817{
1818 int rc;
1819
1820 /*
1821 * Save the MMIO2 and ROM range IDs in pass 0.
1822 */
1823 if (uPass == 0)
1824 {
1825 rc = pgmR3SaveRamConfig(pVM, pSSM);
1826 if (RT_FAILURE(rc))
1827 return rc;
1828 rc = pgmR3SaveRomRanges(pVM, pSSM);
1829 if (RT_FAILURE(rc))
1830 return rc;
1831 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1832 if (RT_FAILURE(rc))
1833 return rc;
1834 }
1835 /*
1836 * Reset the page-per-second estimate to avoid inflation by the initial
1837 * load of zero pages. pgmR3LiveVote ASSUMES this is done at pass 7.
1838 */
1839 else if (uPass == 7)
1840 {
1841 pVM->pgm.s.LiveSave.cSavedPages = 0;
1842 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
1843 }
1844
1845 /*
1846 * Do the scanning.
1847 */
1848 pgmR3ScanRomPages(pVM);
1849 pgmR3ScanMmio2Pages(pVM, uPass);
1850 pgmR3ScanRamPages(pVM, false /*fFinalPass*/);
1851 pgmR3PoolClearAll(pVM, true /*fFlushRemTlb*/); /** @todo this could perhaps be optimized a bit. */
1852
1853 /*
1854 * Save the pages.
1855 */
1856 if (uPass == 0)
1857 rc = pgmR3SaveRomVirginPages( pVM, pSSM, true /*fLiveSave*/);
1858 else
1859 rc = VINF_SUCCESS;
1860 if (RT_SUCCESS(rc))
1861 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
1862 if (RT_SUCCESS(rc))
1863 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, uPass);
1864 if (RT_SUCCESS(rc))
1865 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, uPass);
1866 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes care of it.) */
1867
1868 return rc;
1869}
1870
1871
1872/**
1873 * @callback_method_impl{FNSSMINTLIVEVOTE}
1874 */
1875static DECLCALLBACK(int) pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1876{
1877 /*
1878 * Update and calculate parameters used in the decision making.
1879 */
1880 const uint32_t cHistoryEntries = RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory);
1881
1882 /* update history. */
1883 pgmLock(pVM);
1884 uint32_t const cWrittenToPages = pVM->pgm.s.cWrittenToPages;
1885 pgmUnlock(pVM);
1886 uint32_t const cDirtyNow = pVM->pgm.s.LiveSave.Rom.cDirtyPages
1887 + pVM->pgm.s.LiveSave.Mmio2.cDirtyPages
1888 + pVM->pgm.s.LiveSave.Ram.cDirtyPages
1889 + cWrittenToPages;
1890 uint32_t i = pVM->pgm.s.LiveSave.iDirtyPagesHistory;
1891 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = cDirtyNow;
1892 pVM->pgm.s.LiveSave.iDirtyPagesHistory = (i + 1) % cHistoryEntries;
1893
1894 /* calc shortterm average (4 passes). */
1895 AssertCompile(RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory) > 4);
1896 uint64_t cTotal = pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1897 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 1) % cHistoryEntries];
1898 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 2) % cHistoryEntries];
1899 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 3) % cHistoryEntries];
1900 uint32_t const cDirtyPagesShort = cTotal / 4;
1901 pVM->pgm.s.LiveSave.cDirtyPagesShort = cDirtyPagesShort;
1902
1903 /* calc longterm average. */
1904 cTotal = 0;
1905 if (uPass < cHistoryEntries)
1906 for (i = 0; i < cHistoryEntries && i <= uPass; i++)
1907 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1908 else
1909 for (i = 0; i < cHistoryEntries; i++)
1910 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1911 uint32_t const cDirtyPagesLong = cTotal / cHistoryEntries;
1912 pVM->pgm.s.LiveSave.cDirtyPagesLong = cDirtyPagesLong;
1913
1914 /* estimate the speed */
1915 uint64_t cNsElapsed = RTTimeNanoTS() - pVM->pgm.s.LiveSave.uSaveStartNS;
1916 uint32_t cPagesPerSecond = (uint32_t)( pVM->pgm.s.LiveSave.cSavedPages
1917 / ((long double)cNsElapsed / 1000000000.0) );
1918 pVM->pgm.s.LiveSave.cPagesPerSecond = cPagesPerSecond;
1919
1920 /*
1921 * Try make a decision.
1922 */
1923 if ( cDirtyPagesShort <= cDirtyPagesLong
1924 && ( cDirtyNow <= cDirtyPagesShort
1925 || cDirtyNow - cDirtyPagesShort < RT_MIN(cDirtyPagesShort / 8, 16)
1926 )
1927 )
1928 {
1929 if (uPass > 10)
1930 {
1931 uint32_t cMsLeftShort = (uint32_t)(cDirtyPagesShort / (long double)cPagesPerSecond * 1000.0);
1932 uint32_t cMsLeftLong = (uint32_t)(cDirtyPagesLong / (long double)cPagesPerSecond * 1000.0);
1933 uint32_t cMsMaxDowntime = SSMR3HandleMaxDowntime(pSSM);
1934 if (cMsMaxDowntime < 32)
1935 cMsMaxDowntime = 32;
1936 if ( ( cMsLeftLong <= cMsMaxDowntime
1937 && cMsLeftShort < cMsMaxDowntime)
1938 || cMsLeftShort < cMsMaxDowntime / 2
1939 )
1940 {
1941 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u|%ums cDirtyPagesLong=%u|%ums cMsMaxDowntime=%u\n",
1942 uPass, cDirtyPagesShort, cMsLeftShort, cDirtyPagesLong, cMsLeftLong, cMsMaxDowntime));
1943 return VINF_SUCCESS;
1944 }
1945 }
1946 else
1947 {
1948 if ( ( cDirtyPagesShort <= 128
1949 && cDirtyPagesLong <= 1024)
1950 || cDirtyPagesLong <= 256
1951 )
1952 {
1953 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u cDirtyPagesLong=%u\n", uPass, cDirtyPagesShort, cDirtyPagesLong));
1954 return VINF_SUCCESS;
1955 }
1956 }
1957 }
1958
1959 /*
1960 * Come up with a completion percentage. Currently this is a simple
1961 * dirty page (long term) vs. total pages ratio + some pass trickery.
1962 */
1963 unsigned uPctDirty = (unsigned)( (long double)cDirtyPagesLong
1964 / (pVM->pgm.s.cAllPages - pVM->pgm.s.LiveSave.cIgnoredPages - pVM->pgm.s.cZeroPages) );
1965 if (uPctDirty <= 100)
1966 SSMR3HandleReportLivePercent(pSSM, RT_MIN(100 - uPctDirty, uPass * 2));
1967 else
1968 AssertMsgFailed(("uPctDirty=%u cDirtyPagesLong=%#x cAllPages=%#x cIgnoredPages=%#x cZeroPages=%#x\n",
1969 uPctDirty, cDirtyPagesLong, pVM->pgm.s.cAllPages, pVM->pgm.s.LiveSave.cIgnoredPages, pVM->pgm.s.cZeroPages));
1970
1971 return VINF_SSM_VOTE_FOR_ANOTHER_PASS;
1972}
1973
1974
1975/**
1976 * @callback_method_impl{FNSSMINTLIVEPREP}
1977 *
1978 * This will attempt to allocate and initialize the tracking structures. It
1979 * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
1980 * pgmR3SaveDone will do the cleanups.
1981 */
1982static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
1983{
1984 /*
1985 * Indicate that we will be using the write monitoring.
1986 */
1987 pgmLock(pVM);
1988 /** @todo find a way of mediating this when more users are added. */
1989 if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
1990 {
1991 pgmUnlock(pVM);
1992 AssertLogRelFailedReturn(VERR_PGM_WRITE_MONITOR_ENGAGED);
1993 }
1994 pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
1995 pgmUnlock(pVM);
1996
1997 /*
1998 * Initialize the statistics.
1999 */
2000 pVM->pgm.s.LiveSave.Rom.cReadyPages = 0;
2001 pVM->pgm.s.LiveSave.Rom.cDirtyPages = 0;
2002 pVM->pgm.s.LiveSave.Mmio2.cReadyPages = 0;
2003 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages = 0;
2004 pVM->pgm.s.LiveSave.Ram.cReadyPages = 0;
2005 pVM->pgm.s.LiveSave.Ram.cDirtyPages = 0;
2006 pVM->pgm.s.LiveSave.cIgnoredPages = 0;
2007 pVM->pgm.s.LiveSave.fActive = true;
2008 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory); i++)
2009 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = UINT32_MAX / 2;
2010 pVM->pgm.s.LiveSave.iDirtyPagesHistory = 0;
2011 pVM->pgm.s.LiveSave.cSavedPages = 0;
2012 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
2013 pVM->pgm.s.LiveSave.cPagesPerSecond = 8192;
2014
2015 /*
2016 * Per page type.
2017 */
2018 int rc = pgmR3PrepRomPages(pVM);
2019 if (RT_SUCCESS(rc))
2020 rc = pgmR3PrepMmio2Pages(pVM);
2021 if (RT_SUCCESS(rc))
2022 rc = pgmR3PrepRamPages(pVM);
2023
2024 NOREF(pSSM);
2025 return rc;
2026}
2027
2028
2029/**
2030 * @callback_method_impl{FNSSMINTSAVEEXEC}
2031 */
2032static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
2033{
2034 int rc = VINF_SUCCESS;
2035 PPGM pPGM = &pVM->pgm.s;
2036
2037 /*
2038 * Lock PGM and set the no-more-writes indicator.
2039 */
2040 pgmLock(pVM);
2041 pVM->pgm.s.fNoMorePhysWrites = true;
2042
2043 /*
2044 * Save basic data (required / unaffected by relocation).
2045 */
2046 bool const fMappingsFixed = pVM->pgm.s.fMappingsFixed;
2047 pVM->pgm.s.fMappingsFixed |= pVM->pgm.s.fMappingsFixedRestored;
2048 SSMR3PutStruct(pSSM, pPGM, &s_aPGMFields[0]);
2049 pVM->pgm.s.fMappingsFixed = fMappingsFixed;
2050
2051 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2052 rc = SSMR3PutStruct(pSSM, &pVM->apCpusR3[idCpu]->pgm.s, &s_aPGMCpuFields[0]);
2053
2054 /*
2055 * Save the (remainder of the) memory.
2056 */
2057 if (RT_SUCCESS(rc))
2058 {
2059 if (pVM->pgm.s.LiveSave.fActive)
2060 {
2061 pgmR3ScanRomPages(pVM);
2062 pgmR3ScanMmio2Pages(pVM, SSM_PASS_FINAL);
2063 pgmR3ScanRamPages(pVM, true /*fFinalPass*/);
2064
2065 rc = pgmR3SaveShadowedRomPages( pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
2066 if (RT_SUCCESS(rc))
2067 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2068 if (RT_SUCCESS(rc))
2069 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2070 }
2071 else
2072 {
2073 rc = pgmR3SaveRamConfig(pVM, pSSM);
2074 if (RT_SUCCESS(rc))
2075 rc = pgmR3SaveRomRanges(pVM, pSSM);
2076 if (RT_SUCCESS(rc))
2077 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
2078 if (RT_SUCCESS(rc))
2079 rc = pgmR3SaveRomVirginPages( pVM, pSSM, false /*fLiveSave*/);
2080 if (RT_SUCCESS(rc))
2081 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
2082 if (RT_SUCCESS(rc))
2083 rc = pgmR3SaveMmio2Pages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2084 if (RT_SUCCESS(rc))
2085 rc = pgmR3SaveRamPages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2086 }
2087 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
2088 }
2089
2090 pgmUnlock(pVM);
2091 return rc;
2092}
2093
2094
2095/**
2096 * @callback_method_impl{FNSSMINTSAVEDONE}
2097 */
2098static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
2099{
2100 /*
2101 * Do per page type cleanups first.
2102 */
2103 if (pVM->pgm.s.LiveSave.fActive)
2104 {
2105 pgmR3DoneRomPages(pVM);
2106 pgmR3DoneMmio2Pages(pVM);
2107 pgmR3DoneRamPages(pVM);
2108 }
2109
2110 /*
2111 * Clear the live save indicator and disengage write monitoring.
2112 */
2113 pgmLock(pVM);
2114 pVM->pgm.s.LiveSave.fActive = false;
2115 /** @todo this is blindly assuming that we're the only user of write
2116 * monitoring. Fix this when more users are added. */
2117 pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
2118 pgmUnlock(pVM);
2119
2120 NOREF(pSSM);
2121 return VINF_SUCCESS;
2122}
2123
2124
2125/**
2126 * @callback_method_impl{FNSSMINTLOADPREP}
2127 */
2128static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
2129{
2130 /*
2131 * Call the reset function to make sure all the memory is cleared.
2132 */
2133 PGMR3Reset(pVM);
2134 pVM->pgm.s.LiveSave.fActive = false;
2135 NOREF(pSSM);
2136 return VINF_SUCCESS;
2137}
2138
2139
2140/**
2141 * Load an ignored page.
2142 *
2143 * @returns VBox status code.
2144 * @param pSSM The saved state handle.
2145 */
2146static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
2147{
2148 uint8_t abPage[PAGE_SIZE];
2149 return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
2150}
2151
2152
2153/**
2154 * Compares a page with an old save type value.
2155 *
2156 * @returns true if equal, false if not.
2157 * @param pPage The page to compare.
2158 * @param uOldType The old type value from the saved state.
2159 */
2160DECLINLINE(bool) pgmR3CompareNewAndOldPageTypes(PPGMPAGE pPage, uint8_t uOldType)
2161{
2162 uint8_t uOldPageType;
2163 switch (PGM_PAGE_GET_TYPE(pPage))
2164 {
2165 case PGMPAGETYPE_INVALID: uOldPageType = PGMPAGETYPE_OLD_INVALID; break;
2166 case PGMPAGETYPE_RAM: uOldPageType = PGMPAGETYPE_OLD_RAM; break;
2167 case PGMPAGETYPE_MMIO2: uOldPageType = PGMPAGETYPE_OLD_MMIO2; break;
2168 case PGMPAGETYPE_MMIO2_ALIAS_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO; break;
2169 case PGMPAGETYPE_ROM_SHADOW: uOldPageType = PGMPAGETYPE_OLD_ROM_SHADOW; break;
2170 case PGMPAGETYPE_ROM: uOldPageType = PGMPAGETYPE_OLD_ROM; break;
2171 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: RT_FALL_THRU();
2172 case PGMPAGETYPE_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO; break;
2173 default:
2174 AssertFailed();
2175 uOldPageType = PGMPAGETYPE_OLD_INVALID;
2176 break;
2177 }
2178 return uOldPageType == uOldType;
2179}
2180
2181
2182/**
2183 * Loads a page without any bits in the saved state, i.e. making sure it's
2184 * really zero.
2185 *
2186 * @returns VBox status code.
2187 * @param pVM The cross context VM structure.
2188 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2189 * state).
2190 * @param pPage The guest page tracking structure.
2191 * @param GCPhys The page address.
2192 * @param pRam The ram range (logging).
2193 */
2194static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2195{
2196 if ( uOldType != PGMPAGETYPE_OLD_INVALID
2197 && !pgmR3CompareNewAndOldPageTypes(pPage, uOldType))
2198 return VERR_SSM_UNEXPECTED_DATA;
2199
2200 /* I think this should be sufficient. */
2201 if ( !PGM_PAGE_IS_ZERO(pPage)
2202 && !PGM_PAGE_IS_BALLOONED(pPage))
2203 return VERR_SSM_UNEXPECTED_DATA;
2204
2205 NOREF(pVM);
2206 NOREF(GCPhys);
2207 NOREF(pRam);
2208 return VINF_SUCCESS;
2209}
2210
2211
2212/**
2213 * Loads a page from the saved state.
2214 *
2215 * @returns VBox status code.
2216 * @param pVM The cross context VM structure.
2217 * @param pSSM The SSM handle.
2218 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2219 * state).
2220 * @param pPage The guest page tracking structure.
2221 * @param GCPhys The page address.
2222 * @param pRam The ram range (logging).
2223 */
2224static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2225{
2226 /*
2227 * Match up the type, dealing with MMIO2 aliases (dropped).
2228 */
2229 AssertLogRelMsgReturn( uOldType == PGMPAGETYPE_INVALID
2230 || pgmR3CompareNewAndOldPageTypes(pPage, uOldType)
2231 /* kudge for the expanded PXE bios (r67885) - @bugref{5687}: */
2232 || ( uOldType == PGMPAGETYPE_OLD_RAM
2233 && GCPhys >= 0xed000
2234 && GCPhys <= 0xeffff
2235 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM)
2236 ,
2237 ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
2238 VERR_SSM_UNEXPECTED_DATA);
2239
2240 /*
2241 * Load the page.
2242 */
2243 PGMPAGEMAPLOCK PgMpLck;
2244 void *pvPage;
2245 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
2246 if (RT_SUCCESS(rc))
2247 {
2248 rc = SSMR3GetMem(pSSM, pvPage, PAGE_SIZE);
2249 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2250 }
2251
2252 return rc;
2253}
2254
2255
2256/**
2257 * Loads a page (counter part to pgmR3SavePage).
2258 *
2259 * @returns VBox status code, fully bitched errors.
2260 * @param pVM The cross context VM structure.
2261 * @param pSSM The SSM handle.
2262 * @param uOldType The page type.
2263 * @param pPage The page.
2264 * @param GCPhys The page address.
2265 * @param pRam The RAM range (for error messages).
2266 */
2267static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2268{
2269 uint8_t uState;
2270 int rc = SSMR3GetU8(pSSM, &uState);
2271 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
2272 if (uState == 0 /* zero */)
2273 rc = pgmR3LoadPageZeroOld(pVM, uOldType, pPage, GCPhys, pRam);
2274 else if (uState == 1)
2275 rc = pgmR3LoadPageBitsOld(pVM, pSSM, uOldType, pPage, GCPhys, pRam);
2276 else
2277 rc = VERR_PGM_INVALID_SAVED_PAGE_STATE;
2278 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uOldType=%d GCPhys=%RGp %s rc=%Rrc\n",
2279 pPage, uState, uOldType, GCPhys, pRam->pszDesc, rc),
2280 rc);
2281 return VINF_SUCCESS;
2282}
2283
2284
2285/**
2286 * Loads a shadowed ROM page.
2287 *
2288 * @returns VBox status code, errors are fully bitched.
2289 * @param pVM The cross context VM structure.
2290 * @param pSSM The saved state handle.
2291 * @param pPage The page.
2292 * @param GCPhys The page address.
2293 * @param pRam The RAM range (for error messages).
2294 */
2295static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2296{
2297 /*
2298 * Load and set the protection first, then load the two pages, the first
2299 * one is the active the other is the passive.
2300 */
2301 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
2302 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2303
2304 uint8_t uProt;
2305 int rc = SSMR3GetU8(pSSM, &uProt);
2306 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
2307 PGMROMPROT enmProt = (PGMROMPROT)uProt;
2308 AssertLogRelMsgReturn( enmProt >= PGMROMPROT_INVALID
2309 && enmProt < PGMROMPROT_END,
2310 ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
2311 VERR_SSM_UNEXPECTED_DATA);
2312
2313 if (pRomPage->enmProt != enmProt)
2314 {
2315 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
2316 AssertLogRelRCReturn(rc, rc);
2317 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2318 }
2319
2320 PPGMPAGE pPageActive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2321 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2322 uint8_t u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM : PGMPAGETYPE_ROM_SHADOW;
2323 uint8_t u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
2324
2325 /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
2326 * used down the line (will the 2nd page will be written to the first
2327 * one because of a false TLB hit since the TLB is using GCPhys and
2328 * doesn't check the HCPhys of the desired page). */
2329 rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
2330 if (RT_SUCCESS(rc))
2331 {
2332 *pPageActive = *pPage;
2333 rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
2334 }
2335 return rc;
2336}
2337
2338/**
2339 * Ram range flags and bits for older versions of the saved state.
2340 *
2341 * @returns VBox status code.
2342 *
2343 * @param pVM The cross context VM structure.
2344 * @param pSSM The SSM handle.
2345 * @param uVersion The saved state version.
2346 */
2347static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2348{
2349 PPGM pPGM = &pVM->pgm.s;
2350
2351 /*
2352 * Ram range flags and bits.
2353 */
2354 uint32_t i = 0;
2355 for (PPGMRAMRANGE pRam = pPGM->pRamRangesXR3; ; pRam = pRam->pNextR3, i++)
2356 {
2357 /* Check the sequence number / separator. */
2358 uint32_t u32Sep;
2359 int rc = SSMR3GetU32(pSSM, &u32Sep);
2360 if (RT_FAILURE(rc))
2361 return rc;
2362 if (u32Sep == ~0U)
2363 break;
2364 if (u32Sep != i)
2365 {
2366 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2367 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2368 }
2369 AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2370
2371 /* Get the range details. */
2372 RTGCPHYS GCPhys;
2373 SSMR3GetGCPhys(pSSM, &GCPhys);
2374 RTGCPHYS GCPhysLast;
2375 SSMR3GetGCPhys(pSSM, &GCPhysLast);
2376 RTGCPHYS cb;
2377 SSMR3GetGCPhys(pSSM, &cb);
2378 uint8_t fHaveBits;
2379 rc = SSMR3GetU8(pSSM, &fHaveBits);
2380 if (RT_FAILURE(rc))
2381 return rc;
2382 if (fHaveBits & ~1)
2383 {
2384 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2385 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2386 }
2387 size_t cchDesc = 0;
2388 char szDesc[256];
2389 szDesc[0] = '\0';
2390 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2391 {
2392 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
2393 if (RT_FAILURE(rc))
2394 return rc;
2395 /* Since we've modified the description strings in r45878, only compare
2396 them if the saved state is more recent. */
2397 if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
2398 cchDesc = strlen(szDesc);
2399 }
2400
2401 /*
2402 * Match it up with the current range.
2403 *
2404 * Note there is a hack for dealing with the high BIOS mapping
2405 * in the old saved state format, this means we might not have
2406 * a 1:1 match on success.
2407 */
2408 if ( ( GCPhys != pRam->GCPhys
2409 || GCPhysLast != pRam->GCPhysLast
2410 || cb != pRam->cb
2411 || ( cchDesc
2412 && strcmp(szDesc, pRam->pszDesc)) )
2413 /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
2414 && ( uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
2415 || GCPhys != UINT32_C(0xfff80000)
2416 || GCPhysLast != UINT32_C(0xffffffff)
2417 || pRam->GCPhysLast != GCPhysLast
2418 || pRam->GCPhys < GCPhys
2419 || !fHaveBits)
2420 )
2421 {
2422 LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
2423 "State : %RGp-%RGp %RGp bytes %s %s\n",
2424 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc,
2425 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
2426 /*
2427 * If we're loading a state for debugging purpose, don't make a fuss if
2428 * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
2429 */
2430 if ( SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
2431 || GCPhys < 8 * _1M)
2432 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2433 N_("RAM range mismatch; saved={%RGp-%RGp %RGp bytes %s %s} config={%RGp-%RGp %RGp bytes %s %s}"),
2434 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc,
2435 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc);
2436
2437 AssertMsgFailed(("debug skipping not implemented, sorry\n"));
2438 continue;
2439 }
2440
2441 uint32_t cPages = (GCPhysLast - GCPhys + 1) >> PAGE_SHIFT;
2442 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2443 {
2444 /*
2445 * Load the pages one by one.
2446 */
2447 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2448 {
2449 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2450 PPGMPAGE pPage = &pRam->aPages[iPage];
2451 uint8_t uOldType;
2452 rc = SSMR3GetU8(pSSM, &uOldType);
2453 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
2454 if (uOldType == PGMPAGETYPE_OLD_ROM_SHADOW)
2455 rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
2456 else
2457 rc = pgmR3LoadPageOld(pVM, pSSM, uOldType, pPage, GCPhysPage, pRam);
2458 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2459 }
2460 }
2461 else
2462 {
2463 /*
2464 * Old format.
2465 */
2466
2467 /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
2468 The rest is generally irrelevant and wrong since the stuff have to match registrations. */
2469 uint32_t fFlags = 0;
2470 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2471 {
2472 uint16_t u16Flags;
2473 rc = SSMR3GetU16(pSSM, &u16Flags);
2474 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2475 fFlags |= u16Flags;
2476 }
2477
2478 /* Load the bits */
2479 if ( !fHaveBits
2480 && GCPhysLast < UINT32_C(0xe0000000))
2481 {
2482 /*
2483 * Dynamic chunks.
2484 */
2485 const uint32_t cPagesInChunk = (1*1024*1024) >> PAGE_SHIFT;
2486 AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
2487 ("cPages=%#x cPagesInChunk=%#x GCPhys=%RGp %s\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
2488 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2489
2490 for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
2491 {
2492 uint8_t fPresent;
2493 rc = SSMR3GetU8(pSSM, &fPresent);
2494 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2495 AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
2496 ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
2497 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2498
2499 for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
2500 {
2501 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2502 PPGMPAGE pPage = &pRam->aPages[iPage];
2503 if (fPresent)
2504 {
2505 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO
2506 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
2507 rc = pgmR3LoadPageToDevNullOld(pSSM);
2508 else
2509 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2510 }
2511 else
2512 rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2513 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2514 }
2515 }
2516 }
2517 else if (pRam->pvR3)
2518 {
2519 /*
2520 * MMIO2.
2521 */
2522 AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
2523 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2524 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2525 AssertLogRelMsgReturn(pRam->pvR3,
2526 ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
2527 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2528
2529 rc = SSMR3GetMem(pSSM, pRam->pvR3, pRam->cb);
2530 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
2531 }
2532 else if (GCPhysLast < UINT32_C(0xfff80000))
2533 {
2534 /*
2535 * PCI MMIO, no pages saved.
2536 */
2537 }
2538 else
2539 {
2540 /*
2541 * Load the 0xfff80000..0xffffffff BIOS range.
2542 * It starts with X reserved pages that we have to skip over since
2543 * the RAMRANGE create by the new code won't include those.
2544 */
2545 AssertLogRelMsgReturn( !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
2546 && (fFlags & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
2547 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2548 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2549 AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
2550 ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
2551 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2552
2553 /* Skip wasted reserved pages before the ROM. */
2554 while (GCPhys < pRam->GCPhys)
2555 {
2556 rc = pgmR3LoadPageToDevNullOld(pSSM);
2557 GCPhys += PAGE_SIZE;
2558 }
2559
2560 /* Load the bios pages. */
2561 cPages = pRam->cb >> PAGE_SHIFT;
2562 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2563 {
2564 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
2565 PPGMPAGE pPage = &pRam->aPages[iPage];
2566
2567 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
2568 ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
2569 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2570 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
2571 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2572 }
2573 }
2574 }
2575 }
2576
2577 return VINF_SUCCESS;
2578}
2579
2580
2581/**
2582 * Worker for pgmR3Load and pgmR3LoadLocked.
2583 *
2584 * @returns VBox status code.
2585 *
2586 * @param pVM The cross context VM structure.
2587 * @param pSSM The SSM handle.
2588 * @param uVersion The PGM saved state unit version.
2589 * @param uPass The pass number.
2590 *
2591 * @todo This needs splitting up if more record types or code twists are
2592 * added...
2593 */
2594static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2595{
2596 NOREF(uPass);
2597
2598 /*
2599 * Process page records until we hit the terminator.
2600 */
2601 RTGCPHYS GCPhys = NIL_RTGCPHYS;
2602 PPGMRAMRANGE pRamHint = NULL;
2603 uint8_t id = UINT8_MAX;
2604 uint32_t iPage = UINT32_MAX - 10;
2605 PPGMROMRANGE pRom = NULL;
2606 PPGMREGMMIORANGE pRegMmio = NULL;
2607
2608 /*
2609 * We batch up pages that should be freed instead of calling GMM for
2610 * each and every one of them. Note that we'll lose the pages in most
2611 * failure paths - this should probably be addressed one day.
2612 */
2613 uint32_t cPendingPages = 0;
2614 PGMMFREEPAGESREQ pReq;
2615 int rc = GMMR3FreePagesPrepare(pVM, &pReq, 128 /* batch size */, GMMACCOUNT_BASE);
2616 AssertLogRelRCReturn(rc, rc);
2617
2618 for (;;)
2619 {
2620 /*
2621 * Get the record type and flags.
2622 */
2623 uint8_t u8;
2624 rc = SSMR3GetU8(pSSM, &u8);
2625 if (RT_FAILURE(rc))
2626 return rc;
2627 if (u8 == PGM_STATE_REC_END)
2628 {
2629 /*
2630 * Finish off any pages pending freeing.
2631 */
2632 if (cPendingPages)
2633 {
2634 Log(("pgmR3LoadMemory: GMMR3FreePagesPerform pVM=%p cPendingPages=%u\n", pVM, cPendingPages));
2635 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2636 AssertLogRelRCReturn(rc, rc);
2637 }
2638 GMMR3FreePagesCleanup(pReq);
2639 return VINF_SUCCESS;
2640 }
2641 AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2642 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2643 {
2644 /*
2645 * RAM page.
2646 */
2647 case PGM_STATE_REC_RAM_ZERO:
2648 case PGM_STATE_REC_RAM_RAW:
2649 case PGM_STATE_REC_RAM_BALLOONED:
2650 {
2651 /*
2652 * Get the address and resolve it into a page descriptor.
2653 */
2654 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2655 GCPhys += PAGE_SIZE;
2656 else
2657 {
2658 rc = SSMR3GetGCPhys(pSSM, &GCPhys);
2659 if (RT_FAILURE(rc))
2660 return rc;
2661 }
2662 AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2663
2664 PPGMPAGE pPage;
2665 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
2666 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2667
2668 /*
2669 * Take action according to the record type.
2670 */
2671 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2672 {
2673 case PGM_STATE_REC_RAM_ZERO:
2674 {
2675 if (PGM_PAGE_IS_ZERO(pPage))
2676 break;
2677
2678 /* Ballooned pages must be unmarked (live snapshot and
2679 teleportation scenarios). */
2680 if (PGM_PAGE_IS_BALLOONED(pPage))
2681 {
2682 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2683 if (uVersion == PGM_SAVED_STATE_VERSION_BALLOON_BROKEN)
2684 break;
2685 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2686 break;
2687 }
2688
2689 AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_UNEXPECTED_PAGE_STATE);
2690
2691 /* If this is a ROM page, we must clear it and not try to
2692 * free it. Ditto if the VM is using RamPreAlloc (see
2693 * @bugref{6318}). */
2694 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM
2695 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM_SHADOW
2696 || pVM->pgm.s.fRamPreAlloc)
2697 {
2698 PGMPAGEMAPLOCK PgMpLck;
2699 void *pvDstPage;
2700 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2701 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2702
2703 ASMMemZeroPage(pvDstPage);
2704 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2705 }
2706 /* Free it only if it's not part of a previously
2707 allocated large page (no need to clear the page). */
2708 else if ( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2709 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED)
2710 {
2711 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2712 AssertRCReturn(rc, rc);
2713 }
2714 /** @todo handle large pages (see @bugref{5545}) */
2715 break;
2716 }
2717
2718 case PGM_STATE_REC_RAM_BALLOONED:
2719 {
2720 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2721 if (PGM_PAGE_IS_BALLOONED(pPage))
2722 break;
2723
2724 /* We don't map ballooned pages in our shadow page tables, let's
2725 just free it if allocated and mark as ballooned. See @bugref{5515}. */
2726 if (PGM_PAGE_IS_ALLOCATED(pPage))
2727 {
2728 /** @todo handle large pages + ballooning when it works. (see @bugref{5515},
2729 * @bugref{5545}). */
2730 AssertLogRelMsgReturn( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2731 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED,
2732 ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_LOAD_UNEXPECTED_PAGE_TYPE);
2733
2734 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2735 AssertRCReturn(rc, rc);
2736 }
2737 Assert(PGM_PAGE_IS_ZERO(pPage));
2738 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
2739 break;
2740 }
2741
2742 case PGM_STATE_REC_RAM_RAW:
2743 {
2744 PGMPAGEMAPLOCK PgMpLck;
2745 void *pvDstPage;
2746 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2747 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2748 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2749 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2750 if (RT_FAILURE(rc))
2751 return rc;
2752 break;
2753 }
2754
2755 default:
2756 AssertMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
2757 }
2758 id = UINT8_MAX;
2759 break;
2760 }
2761
2762 /*
2763 * MMIO2 page.
2764 */
2765 case PGM_STATE_REC_MMIO2_RAW:
2766 case PGM_STATE_REC_MMIO2_ZERO:
2767 {
2768 /*
2769 * Get the ID + page number and resolved that into a MMIO2 page.
2770 */
2771 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2772 iPage++;
2773 else
2774 {
2775 SSMR3GetU8(pSSM, &id);
2776 rc = SSMR3GetU32(pSSM, &iPage);
2777 if (RT_FAILURE(rc))
2778 return rc;
2779 }
2780 if ( !pRegMmio
2781 || pRegMmio->idSavedState != id)
2782 {
2783 for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
2784 if ( pRegMmio->idSavedState == id
2785 && (pRegMmio->fFlags & PGMREGMMIORANGE_F_MMIO2))
2786 break;
2787 AssertLogRelMsgReturn(pRegMmio, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND);
2788 }
2789 AssertLogRelMsgReturn(iPage < (pRegMmio->RamRange.cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pRegMmio->RamRange.cb, pRegMmio->RamRange.pszDesc), VERR_PGM_SAVED_MMIO2_PAGE_NOT_FOUND);
2790 void *pvDstPage = (uint8_t *)pRegMmio->RamRange.pvR3 + ((size_t)iPage << PAGE_SHIFT);
2791
2792 /*
2793 * Load the page bits.
2794 */
2795 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_MMIO2_ZERO)
2796 ASMMemZeroPage(pvDstPage);
2797 else
2798 {
2799 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2800 if (RT_FAILURE(rc))
2801 return rc;
2802 }
2803 GCPhys = NIL_RTGCPHYS;
2804 break;
2805 }
2806
2807 /*
2808 * ROM pages.
2809 */
2810 case PGM_STATE_REC_ROM_VIRGIN:
2811 case PGM_STATE_REC_ROM_SHW_RAW:
2812 case PGM_STATE_REC_ROM_SHW_ZERO:
2813 case PGM_STATE_REC_ROM_PROT:
2814 {
2815 /*
2816 * Get the ID + page number and resolved that into a ROM page descriptor.
2817 */
2818 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2819 iPage++;
2820 else
2821 {
2822 SSMR3GetU8(pSSM, &id);
2823 rc = SSMR3GetU32(pSSM, &iPage);
2824 if (RT_FAILURE(rc))
2825 return rc;
2826 }
2827 if ( !pRom
2828 || pRom->idSavedState != id)
2829 {
2830 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2831 if (pRom->idSavedState == id)
2832 break;
2833 AssertLogRelMsgReturn(pRom, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_ROM_RANGE_NOT_FOUND);
2834 }
2835 AssertLogRelMsgReturn(iPage < (pRom->cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pRom->cb, pRom->pszDesc), VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2836 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2837 GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
2838
2839 /*
2840 * Get and set the protection.
2841 */
2842 uint8_t u8Prot;
2843 rc = SSMR3GetU8(pSSM, &u8Prot);
2844 if (RT_FAILURE(rc))
2845 return rc;
2846 PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
2847 AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_PGM_SAVED_ROM_PAGE_PROT);
2848
2849 if (enmProt != pRomPage->enmProt)
2850 {
2851 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
2852 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2853 N_("Protection change of unshadowed ROM page: GCPhys=%RGp enmProt=%d %s"),
2854 GCPhys, enmProt, pRom->pszDesc);
2855 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
2856 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2857 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2858 }
2859 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
2860 break; /* done */
2861
2862 /*
2863 * Get the right page descriptor.
2864 */
2865 PPGMPAGE pRealPage;
2866 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2867 {
2868 case PGM_STATE_REC_ROM_VIRGIN:
2869 if (!PGMROMPROT_IS_ROM(enmProt))
2870 pRealPage = &pRomPage->Virgin;
2871 else
2872 pRealPage = NULL;
2873 break;
2874
2875 case PGM_STATE_REC_ROM_SHW_RAW:
2876 case PGM_STATE_REC_ROM_SHW_ZERO:
2877 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
2878 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2879 N_("Shadowed / non-shadowed page type mismatch: GCPhys=%RGp enmProt=%d %s"),
2880 GCPhys, enmProt, pRom->pszDesc);
2881 if (PGMROMPROT_IS_ROM(enmProt))
2882 pRealPage = &pRomPage->Shadow;
2883 else
2884 pRealPage = NULL;
2885 break;
2886
2887 default: AssertLogRelFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE); /* shut up gcc */
2888 }
2889 if (!pRealPage)
2890 {
2891 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pRealPage, &pRamHint);
2892 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2893 }
2894
2895 /*
2896 * Make it writable and map it (if necessary).
2897 */
2898 void *pvDstPage = NULL;
2899 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2900 {
2901 case PGM_STATE_REC_ROM_SHW_ZERO:
2902 if ( PGM_PAGE_IS_ZERO(pRealPage)
2903 || PGM_PAGE_IS_BALLOONED(pRealPage))
2904 break;
2905 /** @todo implement zero page replacing. */
2906 RT_FALL_THRU();
2907 case PGM_STATE_REC_ROM_VIRGIN:
2908 case PGM_STATE_REC_ROM_SHW_RAW:
2909 {
2910 rc = pgmPhysPageMakeWritableAndMap(pVM, pRealPage, GCPhys, &pvDstPage);
2911 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2912 break;
2913 }
2914 }
2915
2916 /*
2917 * Load the bits.
2918 */
2919 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2920 {
2921 case PGM_STATE_REC_ROM_SHW_ZERO:
2922 if (pvDstPage)
2923 ASMMemZeroPage(pvDstPage);
2924 break;
2925
2926 case PGM_STATE_REC_ROM_VIRGIN:
2927 case PGM_STATE_REC_ROM_SHW_RAW:
2928 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2929 if (RT_FAILURE(rc))
2930 return rc;
2931 break;
2932 }
2933 GCPhys = NIL_RTGCPHYS;
2934 break;
2935 }
2936
2937 /*
2938 * Unknown type.
2939 */
2940 default:
2941 AssertLogRelMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
2942 }
2943 } /* forever */
2944}
2945
2946
2947/**
2948 * Worker for pgmR3Load.
2949 *
2950 * @returns VBox status code.
2951 *
2952 * @param pVM The cross context VM structure.
2953 * @param pSSM The SSM handle.
2954 * @param uVersion The saved state version.
2955 */
2956static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2957{
2958 PPGM pPGM = &pVM->pgm.s;
2959 int rc;
2960 uint32_t u32Sep;
2961
2962 /*
2963 * Load basic data (required / unaffected by relocation).
2964 */
2965 if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
2966 {
2967 if (uVersion > PGM_SAVED_STATE_VERSION_PRE_BALLOON)
2968 rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFields[0]);
2969 else
2970 rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFieldsPreBalloon[0]);
2971
2972 AssertLogRelRCReturn(rc, rc);
2973
2974 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2975 {
2976 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
2977 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFieldsPrePae[0]);
2978 else
2979 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFields[0]);
2980 AssertLogRelRCReturn(rc, rc);
2981 }
2982 }
2983 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2984 {
2985 AssertRelease(pVM->cCpus == 1);
2986
2987 PGMOLD pgmOld;
2988 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
2989 AssertLogRelRCReturn(rc, rc);
2990
2991 pPGM->fMappingsFixed = pgmOld.fMappingsFixed;
2992 pPGM->GCPtrMappingFixed = pgmOld.GCPtrMappingFixed;
2993 pPGM->cbMappingFixed = pgmOld.cbMappingFixed;
2994
2995 PVMCPU pVCpu0 = pVM->apCpusR3[0];
2996 pVCpu0->pgm.s.fA20Enabled = pgmOld.fA20Enabled;
2997 pVCpu0->pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
2998 pVCpu0->pgm.s.enmGuestMode = pgmOld.enmGuestMode;
2999 }
3000 else
3001 {
3002 AssertRelease(pVM->cCpus == 1);
3003
3004 SSMR3GetBool(pSSM, &pPGM->fMappingsFixed);
3005 SSMR3GetGCPtr(pSSM, &pPGM->GCPtrMappingFixed);
3006 SSMR3GetU32(pSSM, &pPGM->cbMappingFixed);
3007
3008 uint32_t cbRamSizeIgnored;
3009 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
3010 if (RT_FAILURE(rc))
3011 return rc;
3012 PVMCPU pVCpu0 = pVM->apCpusR3[0];
3013 SSMR3GetGCPhys(pSSM, &pVCpu0->pgm.s.GCPhysA20Mask);
3014
3015 uint32_t u32 = 0;
3016 SSMR3GetUInt(pSSM, &u32);
3017 pVCpu0->pgm.s.fA20Enabled = !!u32;
3018 SSMR3GetUInt(pSSM, &pVCpu0->pgm.s.fSyncFlags);
3019 RTUINT uGuestMode;
3020 SSMR3GetUInt(pSSM, &uGuestMode);
3021 pVCpu0->pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
3022
3023 /* check separator. */
3024 SSMR3GetU32(pSSM, &u32Sep);
3025 if (RT_FAILURE(rc))
3026 return rc;
3027 if (u32Sep != (uint32_t)~0)
3028 {
3029 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
3030 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
3031 }
3032 }
3033
3034 /*
3035 * Fix the A20 mask.
3036 */
3037 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3038 {
3039 PVMCPU pVCpu = pVM->apCpusR3[i];
3040 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!pVCpu->pgm.s.fA20Enabled << 20);
3041 pgmR3RefreshShadowModeAfterA20Change(pVCpu);
3042 }
3043
3044 /*
3045 * The guest mappings - skipped now, see re-fixation in the caller.
3046 */
3047 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
3048 {
3049 for (uint32_t i = 0; ; i++)
3050 {
3051 rc = SSMR3GetU32(pSSM, &u32Sep); /* sequence number */
3052 if (RT_FAILURE(rc))
3053 return rc;
3054 if (u32Sep == ~0U)
3055 break;
3056 AssertMsgReturn(u32Sep == i, ("u32Sep=%#x i=%#x\n", u32Sep, i), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
3057
3058 char szDesc[256];
3059 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
3060 if (RT_FAILURE(rc))
3061 return rc;
3062 RTGCPTR GCPtrIgnore;
3063 SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* GCPtr */
3064 rc = SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* cPTs */
3065 if (RT_FAILURE(rc))
3066 return rc;
3067 }
3068 }
3069
3070 /*
3071 * Load the RAM contents.
3072 */
3073 if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
3074 {
3075 if (!pVM->pgm.s.LiveSave.fActive)
3076 {
3077 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3078 {
3079 rc = pgmR3LoadRamConfig(pVM, pSSM);
3080 if (RT_FAILURE(rc))
3081 return rc;
3082 }
3083 rc = pgmR3LoadRomRanges(pVM, pSSM);
3084 if (RT_FAILURE(rc))
3085 return rc;
3086 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3087 if (RT_FAILURE(rc))
3088 return rc;
3089 }
3090
3091 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, SSM_PASS_FINAL);
3092 }
3093 else
3094 rc = pgmR3LoadMemoryOld(pVM, pSSM, uVersion);
3095
3096 /* Refresh balloon accounting. */
3097 if (pVM->pgm.s.cBalloonedPages)
3098 {
3099 Log(("pgmR3LoadFinalLocked: pVM=%p cBalloonedPages=%#x\n", pVM, pVM->pgm.s.cBalloonedPages));
3100 rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_INFLATE, pVM->pgm.s.cBalloonedPages);
3101 AssertRCReturn(rc, rc);
3102 }
3103 return rc;
3104}
3105
3106
3107/**
3108 * @callback_method_impl{FNSSMINTLOADEXEC}
3109 */
3110static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3111{
3112 int rc;
3113
3114 /*
3115 * Validate version.
3116 */
3117 if ( ( uPass != SSM_PASS_FINAL
3118 && uVersion != PGM_SAVED_STATE_VERSION
3119 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3120 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3121 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3122 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3123 || ( uVersion != PGM_SAVED_STATE_VERSION
3124 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3125 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3126 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3127 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG
3128 && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
3129 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
3130 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
3131 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
3132 )
3133 {
3134 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
3135 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3136 }
3137
3138 /*
3139 * Do the loading while owning the lock because a bunch of the functions
3140 * we're using requires this.
3141 */
3142 if (uPass != SSM_PASS_FINAL)
3143 {
3144 pgmLock(pVM);
3145 if (uPass != 0)
3146 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3147 else
3148 {
3149 pVM->pgm.s.LiveSave.fActive = true;
3150 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3151 rc = pgmR3LoadRamConfig(pVM, pSSM);
3152 else
3153 rc = VINF_SUCCESS;
3154 if (RT_SUCCESS(rc))
3155 rc = pgmR3LoadRomRanges(pVM, pSSM);
3156 if (RT_SUCCESS(rc))
3157 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3158 if (RT_SUCCESS(rc))
3159 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3160 }
3161 pgmUnlock(pVM);
3162 }
3163 else
3164 {
3165 pgmLock(pVM);
3166 rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
3167 pVM->pgm.s.LiveSave.fActive = false;
3168 pgmUnlock(pVM);
3169 if (RT_SUCCESS(rc))
3170 {
3171 /*
3172 * We require a full resync now.
3173 */
3174 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3175 {
3176 PVMCPU pVCpu = pVM->apCpusR3[i];
3177 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
3178 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3179 /** @todo For guest PAE, we might get the wrong
3180 * aGCPhysGstPaePDs values now. We should used the
3181 * saved ones... Postponing this since it nothing new
3182 * and PAE/PDPTR needs some general readjusting, see
3183 * @bugref{5880}. */
3184 }
3185
3186 pgmR3HandlerPhysicalUpdateAll(pVM);
3187
3188 /*
3189 * Change the paging mode (indirectly restores PGMCPU::GCPhysCR3).
3190 * (Requires the CPUM state to be restored already!)
3191 */
3192 if (CPUMR3IsStateRestorePending(pVM))
3193 return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
3194 N_("PGM was unexpectedly restored before CPUM"));
3195
3196 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3197 {
3198 PVMCPU pVCpu = pVM->apCpusR3[i];
3199
3200 rc = PGMHCChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode);
3201 AssertLogRelRCReturn(rc, rc);
3202
3203 /* Update the PSE, NX flags and validity masks. */
3204 pVCpu->pgm.s.fGst32BitPageSizeExtension = CPUMIsGuestPageSizeExtEnabled(pVCpu);
3205 PGMNotifyNxeChanged(pVCpu, CPUMIsGuestNXEnabled(pVCpu));
3206 }
3207
3208 /*
3209 * Try re-fixate the guest mappings.
3210 */
3211 pVM->pgm.s.fMappingsFixedRestored = false;
3212 if ( pVM->pgm.s.fMappingsFixed
3213 && pgmMapAreMappingsEnabled(pVM))
3214 {
3215#ifndef PGM_WITHOUT_MAPPINGS
3216 RTGCPTR GCPtrFixed = pVM->pgm.s.GCPtrMappingFixed;
3217 uint32_t cbFixed = pVM->pgm.s.cbMappingFixed;
3218 pVM->pgm.s.fMappingsFixed = false;
3219
3220 uint32_t cbRequired;
3221 int rc2 = PGMR3MappingsSize(pVM, &cbRequired); AssertRC(rc2);
3222 if ( RT_SUCCESS(rc2)
3223 && cbRequired > cbFixed)
3224 rc2 = VERR_OUT_OF_RANGE;
3225 if (RT_SUCCESS(rc2))
3226 rc2 = pgmR3MappingsFixInternal(pVM, GCPtrFixed, cbFixed);
3227 if (RT_FAILURE(rc2))
3228 {
3229 LogRel(("PGM: Unable to re-fixate the guest mappings at %RGv-%RGv: rc=%Rrc (cbRequired=%#x)\n",
3230 GCPtrFixed, GCPtrFixed + cbFixed, rc2, cbRequired));
3231 pVM->pgm.s.fMappingsFixed = false;
3232 pVM->pgm.s.fMappingsFixedRestored = true;
3233 pVM->pgm.s.GCPtrMappingFixed = GCPtrFixed;
3234 pVM->pgm.s.cbMappingFixed = cbFixed;
3235 }
3236#else
3237 AssertFailed();
3238#endif
3239 }
3240 else
3241 {
3242 /* We used to set fixed + disabled while we only use disabled now,
3243 so wipe the state to avoid any confusion. */
3244 pVM->pgm.s.fMappingsFixed = false;
3245 pVM->pgm.s.GCPtrMappingFixed = NIL_RTGCPTR;
3246 pVM->pgm.s.cbMappingFixed = 0;
3247 }
3248
3249 /*
3250 * If we have floating mappings, do a CR3 sync now to make sure the HMA
3251 * doesn't conflict with guest code / data and thereby cause trouble
3252 * when restoring other components like PATM.
3253 */
3254 if (pgmMapAreMappingsFloating(pVM))
3255 {
3256 PVMCPU pVCpu = pVM->apCpusR3[0];
3257 rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), true);
3258 if (RT_FAILURE(rc))
3259 return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
3260 N_("PGMSyncCR3 failed unexpectedly with rc=%Rrc"), rc);
3261
3262 /* Make sure to re-sync before executing code. */
3263 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
3264 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3265 }
3266 }
3267 }
3268
3269 return rc;
3270}
3271
3272
3273/**
3274 * @callback_method_impl{FNSSMINTLOADDONE}
3275 */
3276static DECLCALLBACK(int) pgmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
3277{
3278 pVM->pgm.s.fRestoreRomPagesOnReset = true;
3279 NOREF(pSSM);
3280 return VINF_SUCCESS;
3281}
3282
3283
3284/**
3285 * Registers the saved state callbacks with SSM.
3286 *
3287 * @returns VBox status code.
3288 * @param pVM The cross context VM structure.
3289 * @param cbRam The RAM size.
3290 */
3291int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
3292{
3293 return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
3294 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
3295 NULL, pgmR3SaveExec, pgmR3SaveDone,
3296 pgmR3LoadPrep, pgmR3Load, pgmR3LoadDone);
3297}
3298
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette