VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp@ 96335

Last change on this file since 96335 was 95572, checked in by vboxsync, 2 years ago

VMM/PGM: Fixed issues saving and restoring inactive shadow ROM pages when in NEM mode. [correction] bugref:10122

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 125.3 KB
Line 
1/* $Id: PGMSavedState.cpp 95572 2022-07-08 15:02:21Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, The Saved State Part.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/ssm.h>
27#include <VBox/vmm/pdmdrv.h>
28#include <VBox/vmm/pdmdev.h>
29#include "PGMInternal.h"
30#include <VBox/vmm/vmcc.h>
31#include "PGMInline.h"
32
33#include <VBox/param.h>
34#include <VBox/err.h>
35
36#include <iprt/asm.h>
37#include <iprt/assert.h>
38#include <iprt/crc.h>
39#include <iprt/mem.h>
40#include <iprt/sha.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43
44
45/*********************************************************************************************************************************
46* Defined Constants And Macros *
47*********************************************************************************************************************************/
48/** Saved state data unit version. */
49#define PGM_SAVED_STATE_VERSION 14
50/** Saved state data unit version before the PAE PDPE registers. */
51#define PGM_SAVED_STATE_VERSION_PRE_PAE 13
52/** Saved state data unit version after this includes ballooned page flags in
53 * the state (see @bugref{5515}). */
54#define PGM_SAVED_STATE_VERSION_BALLOON_BROKEN 12
55/** Saved state before the balloon change. */
56#define PGM_SAVED_STATE_VERSION_PRE_BALLOON 11
57/** Saved state data unit version used during 3.1 development, misses the RAM
58 * config. */
59#define PGM_SAVED_STATE_VERSION_NO_RAM_CFG 10
60/** Saved state data unit version for 3.0 (pre teleportation). */
61#define PGM_SAVED_STATE_VERSION_3_0_0 9
62/** Saved state data unit version for 2.2.2 and later. */
63#define PGM_SAVED_STATE_VERSION_2_2_2 8
64/** Saved state data unit version for 2.2.0. */
65#define PGM_SAVED_STATE_VERSION_RR_DESC 7
66/** Saved state data unit version. */
67#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6
68
69
70/** @name Sparse state record types
71 * @{ */
72/** Zero page. No data. */
73#define PGM_STATE_REC_RAM_ZERO UINT8_C(0x00)
74/** Raw page. */
75#define PGM_STATE_REC_RAM_RAW UINT8_C(0x01)
76/** Raw MMIO2 page. */
77#define PGM_STATE_REC_MMIO2_RAW UINT8_C(0x02)
78/** Zero MMIO2 page. */
79#define PGM_STATE_REC_MMIO2_ZERO UINT8_C(0x03)
80/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
81#define PGM_STATE_REC_ROM_VIRGIN UINT8_C(0x04)
82/** Raw shadowed ROM page. The protection (8-bit) precedes the raw bits. */
83#define PGM_STATE_REC_ROM_SHW_RAW UINT8_C(0x05)
84/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
85#define PGM_STATE_REC_ROM_SHW_ZERO UINT8_C(0x06)
86/** ROM protection (8-bit). */
87#define PGM_STATE_REC_ROM_PROT UINT8_C(0x07)
88/** Ballooned page. No data. */
89#define PGM_STATE_REC_RAM_BALLOONED UINT8_C(0x08)
90/** The last record type. */
91#define PGM_STATE_REC_LAST PGM_STATE_REC_RAM_BALLOONED
92/** End marker. */
93#define PGM_STATE_REC_END UINT8_C(0xff)
94/** Flag indicating that the data is preceded by the page address.
95 * For RAW pages this is a RTGCPHYS. For MMIO2 and ROM pages this is a 8-bit
96 * range ID and a 32-bit page index.
97 */
98#define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80)
99/** @} */
100
101/** The CRC-32 for a zero page. */
102#define PGM_STATE_CRC32_ZERO_PAGE UINT32_C(0xc71c0011)
103/** The CRC-32 for a zero half page. */
104#define PGM_STATE_CRC32_ZERO_HALF_PAGE UINT32_C(0xf1e8ba9e)
105
106
107
108/** @name Old Page types used in older saved states.
109 * @{ */
110/** Old saved state: The usual invalid zero entry. */
111#define PGMPAGETYPE_OLD_INVALID 0
112/** Old saved state: RAM page. (RWX) */
113#define PGMPAGETYPE_OLD_RAM 1
114/** Old saved state: MMIO2 page. (RWX) */
115#define PGMPAGETYPE_OLD_MMIO2 1
116/** Old saved state: MMIO2 page aliased over an MMIO page. (RWX)
117 * See PGMHandlerPhysicalPageAlias(). */
118#define PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO 2
119/** Old saved state: Shadowed ROM. (RWX) */
120#define PGMPAGETYPE_OLD_ROM_SHADOW 3
121/** Old saved state: ROM page. (R-X) */
122#define PGMPAGETYPE_OLD_ROM 4
123/** Old saved state: MMIO page. (---) */
124#define PGMPAGETYPE_OLD_MMIO 5
125/** @} */
126
127
128/*********************************************************************************************************************************
129* Structures and Typedefs *
130*********************************************************************************************************************************/
131/** For loading old saved states. (pre-smp) */
132typedef struct
133{
134 /** If set no conflict checks are required. (boolean) */
135 bool fMappingsFixed;
136 /** Size of fixed mapping */
137 uint32_t cbMappingFixed;
138 /** Base address (GC) of fixed mapping */
139 RTGCPTR GCPtrMappingFixed;
140 /** A20 gate mask.
141 * Our current approach to A20 emulation is to let REM do it and don't bother
142 * anywhere else. The interesting guests will be operating with it enabled anyway.
143 * But should the need arise, we'll subject physical addresses to this mask. */
144 RTGCPHYS GCPhysA20Mask;
145 /** A20 gate state - boolean! */
146 bool fA20Enabled;
147 /** The guest paging mode. */
148 PGMMODE enmGuestMode;
149} PGMOLD;
150
151
152/*********************************************************************************************************************************
153* Global Variables *
154*********************************************************************************************************************************/
155/** PGM fields to save/load. */
156
157static const SSMFIELD s_aPGMFields[] =
158{
159 SSMFIELD_ENTRY_OLD( fMappingsFixed, sizeof(bool)),
160 SSMFIELD_ENTRY_OLD_GCPTR( GCPtrMappingFixed),
161 SSMFIELD_ENTRY_OLD( cbMappingFixed, sizeof(uint32_t)),
162 SSMFIELD_ENTRY( PGM, cBalloonedPages),
163 SSMFIELD_ENTRY_TERM()
164};
165
166static const SSMFIELD s_aPGMFieldsPreBalloon[] =
167{
168 SSMFIELD_ENTRY_OLD( fMappingsFixed, sizeof(bool)),
169 SSMFIELD_ENTRY_OLD_GCPTR( GCPtrMappingFixed),
170 SSMFIELD_ENTRY_OLD( cbMappingFixed, sizeof(uint32_t)),
171 SSMFIELD_ENTRY_TERM()
172};
173
174static const SSMFIELD s_aPGMCpuFields[] =
175{
176 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
177 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
178 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
179 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[0]),
180 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[1]),
181 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[2]),
182 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[3]),
183 SSMFIELD_ENTRY_TERM()
184};
185
186static const SSMFIELD s_aPGMCpuFieldsPrePae[] =
187{
188 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
189 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
190 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
191 SSMFIELD_ENTRY_TERM()
192};
193
194static const SSMFIELD s_aPGMFields_Old[] =
195{
196 SSMFIELD_ENTRY( PGMOLD, fMappingsFixed),
197 SSMFIELD_ENTRY_GCPTR( PGMOLD, GCPtrMappingFixed),
198 SSMFIELD_ENTRY( PGMOLD, cbMappingFixed),
199 SSMFIELD_ENTRY( PGMOLD, fA20Enabled),
200 SSMFIELD_ENTRY_GCPHYS( PGMOLD, GCPhysA20Mask),
201 SSMFIELD_ENTRY( PGMOLD, enmGuestMode),
202 SSMFIELD_ENTRY_TERM()
203};
204
205
206/**
207 * Find the ROM tracking structure for the given page.
208 *
209 * @returns Pointer to the ROM page structure. NULL if the caller didn't check
210 * that it's a ROM page.
211 * @param pVM The cross context VM structure.
212 * @param GCPhys The address of the ROM page.
213 */
214static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
215{
216 for (PPGMROMRANGE pRomRange = pVM->pgm.s.CTX_SUFF(pRomRanges);
217 pRomRange;
218 pRomRange = pRomRange->CTX_SUFF(pNext))
219 {
220 RTGCPHYS off = GCPhys - pRomRange->GCPhys;
221 if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
222 return &pRomRange->aPages[off >> GUEST_PAGE_SHIFT];
223 }
224 return NULL;
225}
226
227
228/**
229 * Prepares the ROM pages for a live save.
230 *
231 * @returns VBox status code.
232 * @param pVM The cross context VM structure.
233 */
234static int pgmR3PrepRomPages(PVM pVM)
235{
236 /*
237 * Initialize the live save tracking in the ROM page descriptors.
238 */
239 PGM_LOCK_VOID(pVM);
240 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
241 {
242 PPGMRAMRANGE pRamHint = NULL;;
243 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
244
245 for (uint32_t iPage = 0; iPage < cPages; iPage++)
246 {
247 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)PGMROMPROT_INVALID;
248 pRom->aPages[iPage].LiveSave.fWrittenTo = false;
249 pRom->aPages[iPage].LiveSave.fDirty = true;
250 pRom->aPages[iPage].LiveSave.fDirtiedRecently = true;
251 if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
252 {
253 if (PGMROMPROT_IS_ROM(pRom->aPages[iPage].enmProt))
254 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
255 else
256 {
257 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
258 PPGMPAGE pPage;
259 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
260 AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys));
261 if (RT_SUCCESS(rc))
262 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(pPage) && !PGM_PAGE_IS_BALLOONED(pPage);
263 else
264 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
265 }
266 }
267 }
268
269 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
270 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
271 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
272 }
273 PGM_UNLOCK(pVM);
274
275 return VINF_SUCCESS;
276}
277
278
279/**
280 * Assigns IDs to the ROM ranges and saves them.
281 *
282 * @returns VBox status code.
283 * @param pVM The cross context VM structure.
284 * @param pSSM Saved state handle.
285 */
286static int pgmR3SaveRomRanges(PVM pVM, PSSMHANDLE pSSM)
287{
288 PGM_LOCK_VOID(pVM);
289 uint8_t id = 1;
290 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3, id++)
291 {
292 pRom->idSavedState = id;
293 SSMR3PutU8(pSSM, id);
294 SSMR3PutStrZ(pSSM, ""); /* device name */
295 SSMR3PutU32(pSSM, 0); /* device instance */
296 SSMR3PutU8(pSSM, 0); /* region */
297 SSMR3PutStrZ(pSSM, pRom->pszDesc);
298 SSMR3PutGCPhys(pSSM, pRom->GCPhys);
299 int rc = SSMR3PutGCPhys(pSSM, pRom->cb);
300 if (RT_FAILURE(rc))
301 break;
302 }
303 PGM_UNLOCK(pVM);
304 return SSMR3PutU8(pSSM, UINT8_MAX);
305}
306
307
308/**
309 * Loads the ROM range ID assignments.
310 *
311 * @returns VBox status code.
312 *
313 * @param pVM The cross context VM structure.
314 * @param pSSM The saved state handle.
315 */
316static int pgmR3LoadRomRanges(PVM pVM, PSSMHANDLE pSSM)
317{
318 PGM_LOCK_ASSERT_OWNER(pVM);
319
320 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
321 pRom->idSavedState = UINT8_MAX;
322
323 for (;;)
324 {
325 /*
326 * Read the data.
327 */
328 uint8_t id;
329 int rc = SSMR3GetU8(pSSM, &id);
330 if (RT_FAILURE(rc))
331 return rc;
332 if (id == UINT8_MAX)
333 {
334 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
335 if (pRom->idSavedState != UINT8_MAX)
336 { /* likely */ }
337 else if (pRom->fFlags & PGMPHYS_ROM_FLAGS_MAYBE_MISSING_FROM_STATE)
338 LogRel(("PGM: The '%s' ROM was not found in the saved state, but it is marked as maybe-missing, so that's probably okay.\n",
339 pRom->pszDesc));
340 else
341 AssertLogRelMsg(pRom->idSavedState != UINT8_MAX,
342 ("The '%s' ROM was not found in the saved state. Probably due to some misconfiguration\n",
343 pRom->pszDesc));
344 return VINF_SUCCESS; /* the end */
345 }
346 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
347
348 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
349 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
350 AssertLogRelRCReturn(rc, rc);
351
352 uint32_t uInstance;
353 SSMR3GetU32(pSSM, &uInstance);
354 uint8_t iRegion;
355 SSMR3GetU8(pSSM, &iRegion);
356
357 char szDesc[64];
358 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
359 AssertLogRelRCReturn(rc, rc);
360
361 RTGCPHYS GCPhys;
362 SSMR3GetGCPhys(pSSM, &GCPhys);
363 RTGCPHYS cb;
364 rc = SSMR3GetGCPhys(pSSM, &cb);
365 if (RT_FAILURE(rc))
366 return rc;
367 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
368 AssertLogRelMsgReturn(!(cb & GUEST_PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
369
370 /*
371 * Locate a matching ROM range.
372 */
373 AssertLogRelMsgReturn( uInstance == 0
374 && iRegion == 0
375 && szDevName[0] == '\0',
376 ("GCPhys=%RGp %s\n", GCPhys, szDesc),
377 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
378 PPGMROMRANGE pRom;
379 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
380 {
381 if ( pRom->idSavedState == UINT8_MAX
382 && !strcmp(pRom->pszDesc, szDesc))
383 {
384 pRom->idSavedState = id;
385 break;
386 }
387 }
388 if (!pRom)
389 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("ROM at %RGp by the name '%s' was not found"), GCPhys, szDesc);
390 } /* forever */
391}
392
393
394/**
395 * Scan ROM pages.
396 *
397 * @param pVM The cross context VM structure.
398 */
399static void pgmR3ScanRomPages(PVM pVM)
400{
401 /*
402 * The shadow ROMs.
403 */
404 PGM_LOCK_VOID(pVM);
405 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
406 {
407 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
408 {
409 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
410 for (uint32_t iPage = 0; iPage < cPages; iPage++)
411 {
412 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
413 if (pRomPage->LiveSave.fWrittenTo)
414 {
415 pRomPage->LiveSave.fWrittenTo = false;
416 if (!pRomPage->LiveSave.fDirty)
417 {
418 pRomPage->LiveSave.fDirty = true;
419 pVM->pgm.s.LiveSave.Rom.cReadyPages--;
420 pVM->pgm.s.LiveSave.Rom.cDirtyPages++;
421 }
422 pRomPage->LiveSave.fDirtiedRecently = true;
423 }
424 else
425 pRomPage->LiveSave.fDirtiedRecently = false;
426 }
427 }
428 }
429 PGM_UNLOCK(pVM);
430}
431
432
433/**
434 * Takes care of the virgin ROM pages in the first pass.
435 *
436 * This is an attempt at simplifying the handling of ROM pages a little bit.
437 * This ASSUMES that no new ROM ranges will be added and that they won't be
438 * relinked in any way.
439 *
440 * @param pVM The cross context VM structure.
441 * @param pSSM The SSM handle.
442 * @param fLiveSave Whether we're in a live save or not.
443 */
444static int pgmR3SaveRomVirginPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
445{
446 PGM_LOCK_VOID(pVM);
447 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
448 {
449 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
450 for (uint32_t iPage = 0; iPage < cPages; iPage++)
451 {
452 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
453 PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;
454
455 /* Get the virgin page descriptor. */
456 PPGMPAGE pPage;
457 if (PGMROMPROT_IS_ROM(enmProt))
458 pPage = pgmPhysGetPage(pVM, GCPhys);
459 else
460 pPage = &pRom->aPages[iPage].Virgin;
461
462 /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
463 int rc = VINF_SUCCESS;
464 char abPage[GUEST_PAGE_SIZE];
465 if ( !PGM_PAGE_IS_ZERO(pPage)
466 && !PGM_PAGE_IS_BALLOONED(pPage))
467 {
468 void const *pvPage;
469#ifdef VBOX_WITH_PGM_NEM_MODE
470 if (!PGMROMPROT_IS_ROM(enmProt) && pVM->pgm.s.fNemMode)
471 pvPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
472 else
473#endif
474 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
475 if (RT_SUCCESS(rc))
476 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
477 }
478 else
479 RT_ZERO(abPage);
480 PGM_UNLOCK(pVM);
481 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
482
483 /* Save it. */
484 if (iPage > 0)
485 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
486 else
487 {
488 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
489 SSMR3PutU8(pSSM, pRom->idSavedState);
490 SSMR3PutU32(pSSM, iPage);
491 }
492 SSMR3PutU8(pSSM, (uint8_t)enmProt);
493 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
494 if (RT_FAILURE(rc))
495 return rc;
496
497 /* Update state. */
498 PGM_LOCK_VOID(pVM);
499 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
500 if (fLiveSave)
501 {
502 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
503 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
504 pVM->pgm.s.LiveSave.cSavedPages++;
505 }
506 }
507 }
508 PGM_UNLOCK(pVM);
509 return VINF_SUCCESS;
510}
511
512
513/**
514 * Saves dirty pages in the shadowed ROM ranges.
515 *
516 * Used by pgmR3LiveExecPart2 and pgmR3SaveExecMemory.
517 *
518 * @returns VBox status code.
519 * @param pVM The cross context VM structure.
520 * @param pSSM The SSM handle.
521 * @param fLiveSave Whether it's a live save or not.
522 * @param fFinalPass Whether this is the final pass or not.
523 */
524static int pgmR3SaveShadowedRomPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
525{
526 /*
527 * The Shadowed ROMs.
528 *
529 * ASSUMES that the ROM ranges are fixed.
530 * ASSUMES that all the ROM ranges are mapped.
531 */
532 PGM_LOCK_VOID(pVM);
533 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
534 {
535 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
536 {
537 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
538 uint32_t iPrevPage = cPages;
539 for (uint32_t iPage = 0; iPage < cPages; iPage++)
540 {
541 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
542 if ( !fLiveSave
543 || ( pRomPage->LiveSave.fDirty
544 && ( ( !pRomPage->LiveSave.fDirtiedRecently
545 && !pRomPage->LiveSave.fWrittenTo)
546 || fFinalPass
547 )
548 )
549 )
550 {
551 uint8_t abPage[GUEST_PAGE_SIZE];
552 PGMROMPROT enmProt = pRomPage->enmProt;
553 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
554 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(pVM, GCPhys);
555 bool fZero = PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_BALLOONED(pPage); Assert(!PGM_PAGE_IS_BALLOONED(pPage)); /* Shouldn't be ballooned. */
556 int rc = VINF_SUCCESS;
557 if (!fZero)
558 {
559 void const *pvPage;
560#ifdef VBOX_WITH_PGM_NEM_MODE
561 if (PGMROMPROT_IS_ROM(enmProt) && pVM->pgm.s.fNemMode)
562 pvPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
563 else
564#endif
565 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
566 if (RT_SUCCESS(rc))
567 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
568 }
569 if (fLiveSave && RT_SUCCESS(rc))
570 {
571 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
572 pRomPage->LiveSave.fDirty = false;
573 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
574 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
575 pVM->pgm.s.LiveSave.cSavedPages++;
576 }
577 PGM_UNLOCK(pVM);
578 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
579
580 if (iPage - 1U == iPrevPage && iPage > 0)
581 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW));
582 else
583 {
584 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW) | PGM_STATE_REC_FLAG_ADDR);
585 SSMR3PutU8(pSSM, pRom->idSavedState);
586 SSMR3PutU32(pSSM, iPage);
587 }
588 rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
589 if (!fZero)
590 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
591 if (RT_FAILURE(rc))
592 return rc;
593
594 PGM_LOCK_VOID(pVM);
595 iPrevPage = iPage;
596 }
597 /*
598 * In the final pass, make sure the protection is in sync.
599 */
600 else if ( fFinalPass
601 && pRomPage->LiveSave.u8Prot != pRomPage->enmProt)
602 {
603 PGMROMPROT enmProt = pRomPage->enmProt;
604 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
605 PGM_UNLOCK(pVM);
606
607 if (iPage - 1U == iPrevPage && iPage > 0)
608 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT);
609 else
610 {
611 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT | PGM_STATE_REC_FLAG_ADDR);
612 SSMR3PutU8(pSSM, pRom->idSavedState);
613 SSMR3PutU32(pSSM, iPage);
614 }
615 int rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
616 if (RT_FAILURE(rc))
617 return rc;
618
619 PGM_LOCK_VOID(pVM);
620 iPrevPage = iPage;
621 }
622 }
623 }
624 }
625 PGM_UNLOCK(pVM);
626 return VINF_SUCCESS;
627}
628
629
630/**
631 * Cleans up ROM pages after a live save.
632 *
633 * @param pVM The cross context VM structure.
634 */
635static void pgmR3DoneRomPages(PVM pVM)
636{
637 NOREF(pVM);
638}
639
640
641/**
642 * Prepares the MMIO2 pages for a live save.
643 *
644 * @returns VBox status code.
645 * @param pVM The cross context VM structure.
646 */
647static int pgmR3PrepMmio2Pages(PVM pVM)
648{
649 /*
650 * Initialize the live save tracking in the MMIO2 ranges.
651 * ASSUME nothing changes here.
652 */
653 PGM_LOCK_VOID(pVM);
654 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
655 {
656 uint32_t const cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
657 PGM_UNLOCK(pVM);
658
659 PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM,
660 sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
661 if (!paLSPages)
662 return VERR_NO_MEMORY;
663 for (uint32_t iPage = 0; iPage < cPages; iPage++)
664 {
665 /* Initialize it as a dirty zero page. */
666 paLSPages[iPage].fDirty = true;
667 paLSPages[iPage].cUnchangedScans = 0;
668 paLSPages[iPage].fZero = true;
669 paLSPages[iPage].u32CrcH1 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
670 paLSPages[iPage].u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
671 }
672
673 PGM_LOCK_VOID(pVM);
674 pRegMmio->paLSPages = paLSPages;
675 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
676 }
677 PGM_UNLOCK(pVM);
678 return VINF_SUCCESS;
679}
680
681
682/**
683 * Assigns IDs to the MMIO2 ranges and saves them.
684 *
685 * @returns VBox status code.
686 * @param pVM The cross context VM structure.
687 * @param pSSM Saved state handle.
688 */
689static int pgmR3SaveMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
690{
691 PGM_LOCK_VOID(pVM);
692 uint8_t id = 1;
693 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
694 {
695 pRegMmio->idSavedState = id;
696 SSMR3PutU8(pSSM, id);
697 SSMR3PutStrZ(pSSM, pRegMmio->pDevInsR3->pReg->szName);
698 SSMR3PutU32(pSSM, pRegMmio->pDevInsR3->iInstance);
699 SSMR3PutU8(pSSM, pRegMmio->iRegion);
700 SSMR3PutStrZ(pSSM, pRegMmio->RamRange.pszDesc);
701 int rc = SSMR3PutGCPhys(pSSM, pRegMmio->RamRange.cb);
702 if (RT_FAILURE(rc))
703 break;
704 id++;
705 }
706 PGM_UNLOCK(pVM);
707 return SSMR3PutU8(pSSM, UINT8_MAX);
708}
709
710
711/**
712 * Loads the MMIO2 range ID assignments.
713 *
714 * @returns VBox status code.
715 *
716 * @param pVM The cross context VM structure.
717 * @param pSSM The saved state handle.
718 */
719static int pgmR3LoadMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
720{
721 PGM_LOCK_ASSERT_OWNER(pVM);
722
723 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
724 pRegMmio->idSavedState = UINT8_MAX;
725
726 for (;;)
727 {
728 /*
729 * Read the data.
730 */
731 uint8_t id;
732 int rc = SSMR3GetU8(pSSM, &id);
733 if (RT_FAILURE(rc))
734 return rc;
735 if (id == UINT8_MAX)
736 {
737 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
738 AssertLogRelMsg(pRegMmio->idSavedState != UINT8_MAX, ("%s\n", pRegMmio->RamRange.pszDesc));
739 return VINF_SUCCESS; /* the end */
740 }
741 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
742
743 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
744 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
745 AssertLogRelRCReturn(rc, rc);
746
747 uint32_t uInstance;
748 SSMR3GetU32(pSSM, &uInstance);
749 uint8_t iRegion;
750 SSMR3GetU8(pSSM, &iRegion);
751
752 char szDesc[64];
753 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
754 AssertLogRelRCReturn(rc, rc);
755
756 RTGCPHYS cb;
757 rc = SSMR3GetGCPhys(pSSM, &cb);
758 AssertLogRelMsgReturn(!(cb & GUEST_PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
759
760 /*
761 * Locate a matching MMIO2 range.
762 */
763 PPGMREGMMIO2RANGE pRegMmio;
764 for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
765 {
766 if ( pRegMmio->idSavedState == UINT8_MAX
767 && pRegMmio->iRegion == iRegion
768 && pRegMmio->pDevInsR3->iInstance == uInstance
769 && !strcmp(pRegMmio->pDevInsR3->pReg->szName, szDevName))
770 {
771 pRegMmio->idSavedState = id;
772 break;
773 }
774 }
775 if (!pRegMmio)
776 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to locate a MMIO2 range called '%s' owned by %s/%u, region %d"),
777 szDesc, szDevName, uInstance, iRegion);
778
779 /*
780 * Validate the configuration, the size of the MMIO2 region should be
781 * the same.
782 */
783 if (cb != pRegMmio->RamRange.cb)
784 {
785 LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n",
786 pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb));
787 if (cb > pRegMmio->RamRange.cb) /* bad idea? */
788 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp"),
789 pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb);
790 }
791 } /* forever */
792}
793
794
795/**
796 * Scans one MMIO2 page.
797 *
798 * @returns True if changed, false if unchanged.
799 *
800 * @param pVM The cross context VM structure.
801 * @param pbPage The page bits.
802 * @param pLSPage The live save tracking structure for the page.
803 *
804 */
805DECLINLINE(bool) pgmR3ScanMmio2Page(PVM pVM, uint8_t const *pbPage, PPGMLIVESAVEMMIO2PAGE pLSPage)
806{
807 /*
808 * Special handling of zero pages.
809 */
810 bool const fZero = pLSPage->fZero;
811 if (fZero)
812 {
813 if (ASMMemIsZero(pbPage, GUEST_PAGE_SIZE))
814 {
815 /* Not modified. */
816 if (pLSPage->fDirty)
817 pLSPage->cUnchangedScans++;
818 return false;
819 }
820
821 pLSPage->fZero = false;
822 pLSPage->u32CrcH1 = RTCrc32(pbPage, GUEST_PAGE_SIZE / 2);
823 }
824 else
825 {
826 /*
827 * CRC the first half, if it doesn't match the page is dirty and
828 * we won't check the 2nd half (we'll do that next time).
829 */
830 uint32_t u32CrcH1 = RTCrc32(pbPage, GUEST_PAGE_SIZE / 2);
831 if (u32CrcH1 == pLSPage->u32CrcH1)
832 {
833 uint32_t u32CrcH2 = RTCrc32(pbPage + GUEST_PAGE_SIZE / 2, GUEST_PAGE_SIZE / 2);
834 if (u32CrcH2 == pLSPage->u32CrcH2)
835 {
836 /* Probably not modified. */
837 if (pLSPage->fDirty)
838 pLSPage->cUnchangedScans++;
839 return false;
840 }
841
842 pLSPage->u32CrcH2 = u32CrcH2;
843 }
844 else
845 {
846 pLSPage->u32CrcH1 = u32CrcH1;
847 if ( u32CrcH1 == PGM_STATE_CRC32_ZERO_HALF_PAGE
848 && ASMMemIsZero(pbPage, GUEST_PAGE_SIZE))
849 {
850 pLSPage->u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
851 pLSPage->fZero = true;
852 }
853 }
854 }
855
856 /* dirty page path */
857 pLSPage->cUnchangedScans = 0;
858 if (!pLSPage->fDirty)
859 {
860 pLSPage->fDirty = true;
861 pVM->pgm.s.LiveSave.Mmio2.cReadyPages--;
862 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages++;
863 if (fZero)
864 pVM->pgm.s.LiveSave.Mmio2.cZeroPages--;
865 }
866 return true;
867}
868
869
870/**
871 * Scan for MMIO2 page modifications.
872 *
873 * @param pVM The cross context VM structure.
874 * @param uPass The pass number.
875 */
876static void pgmR3ScanMmio2Pages(PVM pVM, uint32_t uPass)
877{
878 /*
879 * Since this is a bit expensive we lower the scan rate after a little while.
880 */
881 if ( ( (uPass & 3) != 0
882 && uPass > 10)
883 || uPass == SSM_PASS_FINAL)
884 return;
885
886 PGM_LOCK_VOID(pVM); /* paranoia */
887 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
888 {
889 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
890 uint32_t cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
891 PGM_UNLOCK(pVM);
892
893 for (uint32_t iPage = 0; iPage < cPages; iPage++)
894 {
895 uint8_t const *pbPage = (uint8_t const *)pRegMmio->pvR3 + iPage * GUEST_PAGE_SIZE;
896 pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]);
897 }
898
899 PGM_LOCK_VOID(pVM);
900 }
901 PGM_UNLOCK(pVM);
902
903}
904
905
906/**
907 * Save quiescent MMIO2 pages.
908 *
909 * @returns VBox status code.
910 * @param pVM The cross context VM structure.
911 * @param pSSM The SSM handle.
912 * @param fLiveSave Whether it's a live save or not.
913 * @param uPass The pass number.
914 */
915static int pgmR3SaveMmio2Pages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
916{
917 /** @todo implement live saving of MMIO2 pages. (Need some way of telling the
918 * device that we wish to know about changes.) */
919
920 int rc = VINF_SUCCESS;
921 if (uPass == SSM_PASS_FINAL)
922 {
923 /*
924 * The mop up round.
925 */
926 PGM_LOCK_VOID(pVM);
927 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3;
928 pRegMmio && RT_SUCCESS(rc);
929 pRegMmio = pRegMmio->pNextR3)
930 {
931 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
932 uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3;
933 uint32_t cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
934 uint32_t iPageLast = cPages;
935 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += GUEST_PAGE_SIZE)
936 {
937 uint8_t u8Type;
938 if (!fLiveSave)
939 u8Type = ASMMemIsZero(pbPage, GUEST_PAGE_SIZE) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
940 else
941 {
942 /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
943 if ( !paLSPages[iPage].fDirty
944 && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
945 {
946 if (paLSPages[iPage].fZero)
947 continue;
948
949 uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
950 RTSha1(pbPage, GUEST_PAGE_SIZE, abSha1Hash);
951 if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
952 continue;
953 }
954 u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
955 pVM->pgm.s.LiveSave.cSavedPages++;
956 }
957
958 if (iPage != 0 && iPage == iPageLast + 1)
959 rc = SSMR3PutU8(pSSM, u8Type);
960 else
961 {
962 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
963 SSMR3PutU8(pSSM, pRegMmio->idSavedState);
964 rc = SSMR3PutU32(pSSM, iPage);
965 }
966 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
967 rc = SSMR3PutMem(pSSM, pbPage, GUEST_PAGE_SIZE);
968 if (RT_FAILURE(rc))
969 break;
970 iPageLast = iPage;
971 }
972 }
973 PGM_UNLOCK(pVM);
974 }
975 /*
976 * Reduce the rate after a little while since the current MMIO2 approach is
977 * a bit expensive.
978 * We position it two passes after the scan pass to avoid saving busy pages.
979 */
980 else if ( uPass <= 10
981 || (uPass & 3) == 2)
982 {
983 PGM_LOCK_VOID(pVM);
984 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3;
985 pRegMmio && RT_SUCCESS(rc);
986 pRegMmio = pRegMmio->pNextR3)
987 {
988 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
989 uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3;
990 uint32_t cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
991 uint32_t iPageLast = cPages;
992 PGM_UNLOCK(pVM);
993
994 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += GUEST_PAGE_SIZE)
995 {
996 /* Skip clean pages and pages which hasn't quiesced. */
997 if (!paLSPages[iPage].fDirty)
998 continue;
999 if (paLSPages[iPage].cUnchangedScans < 3)
1000 continue;
1001 if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
1002 continue;
1003
1004 /* Save it. */
1005 bool const fZero = paLSPages[iPage].fZero;
1006 uint8_t abPage[GUEST_PAGE_SIZE];
1007 if (!fZero)
1008 {
1009 memcpy(abPage, pbPage, GUEST_PAGE_SIZE);
1010 RTSha1(abPage, GUEST_PAGE_SIZE, paLSPages[iPage].abSha1Saved);
1011 }
1012
1013 uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
1014 if (iPage != 0 && iPage == iPageLast + 1)
1015 rc = SSMR3PutU8(pSSM, u8Type);
1016 else
1017 {
1018 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
1019 SSMR3PutU8(pSSM, pRegMmio->idSavedState);
1020 rc = SSMR3PutU32(pSSM, iPage);
1021 }
1022 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
1023 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
1024 if (RT_FAILURE(rc))
1025 break;
1026
1027 /* Housekeeping. */
1028 paLSPages[iPage].fDirty = false;
1029 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
1030 pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
1031 if (u8Type == PGM_STATE_REC_MMIO2_ZERO)
1032 pVM->pgm.s.LiveSave.Mmio2.cZeroPages++;
1033 pVM->pgm.s.LiveSave.cSavedPages++;
1034 iPageLast = iPage;
1035 }
1036
1037 PGM_LOCK_VOID(pVM);
1038 }
1039 PGM_UNLOCK(pVM);
1040 }
1041
1042 return rc;
1043}
1044
1045
1046/**
1047 * Cleans up MMIO2 pages after a live save.
1048 *
1049 * @param pVM The cross context VM structure.
1050 */
1051static void pgmR3DoneMmio2Pages(PVM pVM)
1052{
1053 /*
1054 * Free the tracking structures for the MMIO2 pages.
1055 * We do the freeing outside the lock in case the VM is running.
1056 */
1057 PGM_LOCK_VOID(pVM);
1058 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
1059 {
1060 void *pvMmio2ToFree = pRegMmio->paLSPages;
1061 if (pvMmio2ToFree)
1062 {
1063 pRegMmio->paLSPages = NULL;
1064 PGM_UNLOCK(pVM);
1065 MMR3HeapFree(pvMmio2ToFree);
1066 PGM_LOCK_VOID(pVM);
1067 }
1068 }
1069 PGM_UNLOCK(pVM);
1070}
1071
1072
1073/**
1074 * Prepares the RAM pages for a live save.
1075 *
1076 * @returns VBox status code.
1077 * @param pVM The cross context VM structure.
1078 */
1079static int pgmR3PrepRamPages(PVM pVM)
1080{
1081
1082 /*
1083 * Try allocating tracking structures for the ram ranges.
1084 *
1085 * To avoid lock contention, we leave the lock every time we're allocating
1086 * a new array. This means we'll have to ditch the allocation and start
1087 * all over again if the RAM range list changes in-between.
1088 *
1089 * Note! pgmR3SaveDone will always be called and it is therefore responsible
1090 * for cleaning up.
1091 */
1092 PPGMRAMRANGE pCur;
1093 PGM_LOCK_VOID(pVM);
1094 do
1095 {
1096 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1097 {
1098 if ( !pCur->paLSPages
1099 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1100 {
1101 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1102 uint32_t const cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1103 PGM_UNLOCK(pVM);
1104 PPGMLIVESAVERAMPAGE paLSPages = (PPGMLIVESAVERAMPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVERAMPAGE));
1105 if (!paLSPages)
1106 return VERR_NO_MEMORY;
1107 PGM_LOCK_VOID(pVM);
1108 if (pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1109 {
1110 PGM_UNLOCK(pVM);
1111 MMR3HeapFree(paLSPages);
1112 PGM_LOCK_VOID(pVM);
1113 break; /* try again */
1114 }
1115 pCur->paLSPages = paLSPages;
1116
1117 /*
1118 * Initialize the array.
1119 */
1120 uint32_t iPage = cPages;
1121 while (iPage-- > 0)
1122 {
1123 /** @todo yield critsect! (after moving this away from EMT0) */
1124 PCPGMPAGE pPage = &pCur->aPages[iPage];
1125 paLSPages[iPage].cDirtied = 0;
1126 paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */
1127 paLSPages[iPage].fWriteMonitored = 0;
1128 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1129 paLSPages[iPage].u2Reserved = 0;
1130 switch (PGM_PAGE_GET_TYPE(pPage))
1131 {
1132 case PGMPAGETYPE_RAM:
1133 if ( PGM_PAGE_IS_ZERO(pPage)
1134 || PGM_PAGE_IS_BALLOONED(pPage))
1135 {
1136 paLSPages[iPage].fZero = 1;
1137 paLSPages[iPage].fShared = 0;
1138#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1139 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1140#endif
1141 }
1142 else if (PGM_PAGE_IS_SHARED(pPage))
1143 {
1144 paLSPages[iPage].fZero = 0;
1145 paLSPages[iPage].fShared = 1;
1146#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1147 paLSPages[iPage].u32Crc = UINT32_MAX;
1148#endif
1149 }
1150 else
1151 {
1152 paLSPages[iPage].fZero = 0;
1153 paLSPages[iPage].fShared = 0;
1154#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1155 paLSPages[iPage].u32Crc = UINT32_MAX;
1156#endif
1157 }
1158 paLSPages[iPage].fIgnore = 0;
1159 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1160 break;
1161
1162 case PGMPAGETYPE_ROM_SHADOW:
1163 case PGMPAGETYPE_ROM:
1164 {
1165 paLSPages[iPage].fZero = 0;
1166 paLSPages[iPage].fShared = 0;
1167 paLSPages[iPage].fDirty = 0;
1168 paLSPages[iPage].fIgnore = 1;
1169#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1170 paLSPages[iPage].u32Crc = UINT32_MAX;
1171#endif
1172 pVM->pgm.s.LiveSave.cIgnoredPages++;
1173 break;
1174 }
1175
1176 default:
1177 AssertMsgFailed(("%R[pgmpage]", pPage));
1178 RT_FALL_THRU();
1179 case PGMPAGETYPE_MMIO2:
1180 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1181 paLSPages[iPage].fZero = 0;
1182 paLSPages[iPage].fShared = 0;
1183 paLSPages[iPage].fDirty = 0;
1184 paLSPages[iPage].fIgnore = 1;
1185#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1186 paLSPages[iPage].u32Crc = UINT32_MAX;
1187#endif
1188 pVM->pgm.s.LiveSave.cIgnoredPages++;
1189 break;
1190
1191 case PGMPAGETYPE_MMIO:
1192 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
1193 paLSPages[iPage].fZero = 0;
1194 paLSPages[iPage].fShared = 0;
1195 paLSPages[iPage].fDirty = 0;
1196 paLSPages[iPage].fIgnore = 1;
1197#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1198 paLSPages[iPage].u32Crc = UINT32_MAX;
1199#endif
1200 pVM->pgm.s.LiveSave.cIgnoredPages++;
1201 break;
1202 }
1203 }
1204 }
1205 }
1206 } while (pCur);
1207 PGM_UNLOCK(pVM);
1208
1209 return VINF_SUCCESS;
1210}
1211
1212
1213/**
1214 * Saves the RAM configuration.
1215 *
1216 * @returns VBox status code.
1217 * @param pVM The cross context VM structure.
1218 * @param pSSM The saved state handle.
1219 */
1220static int pgmR3SaveRamConfig(PVM pVM, PSSMHANDLE pSSM)
1221{
1222 uint32_t cbRamHole = 0;
1223 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
1224 AssertRCReturn(rc, rc);
1225
1226 uint64_t cbRam = 0;
1227 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
1228 AssertRCReturn(rc, rc);
1229
1230 SSMR3PutU32(pSSM, cbRamHole);
1231 return SSMR3PutU64(pSSM, cbRam);
1232}
1233
1234
1235/**
1236 * Loads and verifies the RAM configuration.
1237 *
1238 * @returns VBox status code.
1239 * @param pVM The cross context VM structure.
1240 * @param pSSM The saved state handle.
1241 */
1242static int pgmR3LoadRamConfig(PVM pVM, PSSMHANDLE pSSM)
1243{
1244 uint32_t cbRamHoleCfg = 0;
1245 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHoleCfg, MM_RAM_HOLE_SIZE_DEFAULT);
1246 AssertRCReturn(rc, rc);
1247
1248 uint64_t cbRamCfg = 0;
1249 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRamCfg, 0);
1250 AssertRCReturn(rc, rc);
1251
1252 uint32_t cbRamHoleSaved;
1253 SSMR3GetU32(pSSM, &cbRamHoleSaved);
1254
1255 uint64_t cbRamSaved;
1256 rc = SSMR3GetU64(pSSM, &cbRamSaved);
1257 AssertRCReturn(rc, rc);
1258
1259 if ( cbRamHoleCfg != cbRamHoleSaved
1260 || cbRamCfg != cbRamSaved)
1261 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Ram config mismatch: saved=%RX64/%RX32 config=%RX64/%RX32 (RAM/Hole)"),
1262 cbRamSaved, cbRamHoleSaved, cbRamCfg, cbRamHoleCfg);
1263 return VINF_SUCCESS;
1264}
1265
1266#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1267
1268/**
1269 * Calculates the CRC-32 for a RAM page and updates the live save page tracking
1270 * info with it.
1271 *
1272 * @param pVM The cross context VM structure.
1273 * @param pCur The current RAM range.
1274 * @param paLSPages The current array of live save page tracking
1275 * structures.
1276 * @param iPage The page index.
1277 */
1278static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
1279{
1280 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1281 PGMPAGEMAPLOCK PgMpLck;
1282 void const *pvPage;
1283 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1284 if (RT_SUCCESS(rc))
1285 {
1286 paLSPages[iPage].u32Crc = RTCrc32(pvPage, GUEST_PAGE_SIZE);
1287 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1288 }
1289 else
1290 paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */
1291}
1292
1293
1294/**
1295 * Verifies the CRC-32 for a page given it's raw bits.
1296 *
1297 * @param pvPage The page bits.
1298 * @param pCur The current RAM range.
1299 * @param paLSPages The current array of live save page tracking
1300 * structures.
1301 * @param iPage The page index.
1302 */
1303static void pgmR3StateVerifyCrc32ForPage(void const *pvPage, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1304{
1305 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1306 {
1307 uint32_t u32Crc = RTCrc32(pvPage, GUEST_PAGE_SIZE);
1308 Assert( ( !PGM_PAGE_IS_ZERO(&pCur->aPages[iPage])
1309 && !PGM_PAGE_IS_BALLOONED(&pCur->aPages[iPage]))
1310 || u32Crc == PGM_STATE_CRC32_ZERO_PAGE);
1311 AssertMsg(paLSPages[iPage].u32Crc == u32Crc,
1312 ("%08x != %08x for %RGp %R[pgmpage] %s\n", paLSPages[iPage].u32Crc, u32Crc,
1313 pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage], pszWhere));
1314 }
1315}
1316
1317
1318/**
1319 * Verifies the CRC-32 for a RAM page.
1320 *
1321 * @param pVM The cross context VM structure.
1322 * @param pCur The current RAM range.
1323 * @param paLSPages The current array of live save page tracking
1324 * structures.
1325 * @param iPage The page index.
1326 */
1327static void pgmR3StateVerifyCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1328{
1329 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1330 {
1331 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1332 PGMPAGEMAPLOCK PgMpLck;
1333 void const *pvPage;
1334 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1335 if (RT_SUCCESS(rc))
1336 {
1337 pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage, pszWhere);
1338 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1339 }
1340 }
1341}
1342
1343#endif /* PGMLIVESAVERAMPAGE_WITH_CRC32 */
1344
1345/**
1346 * Scan for RAM page modifications and reprotect them.
1347 *
1348 * @param pVM The cross context VM structure.
1349 * @param fFinalPass Whether this is the final pass or not.
1350 */
1351static void pgmR3ScanRamPages(PVM pVM, bool fFinalPass)
1352{
1353 /*
1354 * The RAM.
1355 */
1356 RTGCPHYS GCPhysCur = 0;
1357 PPGMRAMRANGE pCur;
1358 PGM_LOCK_VOID(pVM);
1359 do
1360 {
1361 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1362 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1363 {
1364 if ( pCur->GCPhysLast > GCPhysCur
1365 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1366 {
1367 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1368 uint32_t cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1369 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> GUEST_PAGE_SHIFT;
1370 GCPhysCur = 0;
1371 for (; iPage < cPages; iPage++)
1372 {
1373 /* Do yield first. */
1374 if ( !fFinalPass
1375#ifndef PGMLIVESAVERAMPAGE_WITH_CRC32
1376 && (iPage & 0x7ff) == 0x100
1377#endif
1378 && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX)
1379 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1380 {
1381 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1382 break; /* restart */
1383 }
1384
1385 /* Skip already ignored pages. */
1386 if (paLSPages[iPage].fIgnore)
1387 continue;
1388
1389 if (RT_LIKELY(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM))
1390 {
1391 /*
1392 * A RAM page.
1393 */
1394 switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
1395 {
1396 case PGM_PAGE_STATE_ALLOCATED:
1397 /** @todo Optimize this: Don't always re-enable write
1398 * monitoring if the page is known to be very busy. */
1399 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1400 {
1401 AssertMsg(paLSPages[iPage].fWriteMonitored,
1402 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage]));
1403 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1404 Assert(pVM->pgm.s.cWrittenToPages > 0);
1405 pVM->pgm.s.cWrittenToPages--;
1406 }
1407 else
1408 {
1409 AssertMsg(!paLSPages[iPage].fWriteMonitored,
1410 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage]));
1411 pVM->pgm.s.LiveSave.Ram.cMonitoredPages++;
1412 }
1413
1414 if (!paLSPages[iPage].fDirty)
1415 {
1416 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1417 if (paLSPages[iPage].fZero)
1418 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1419 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1420 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1421 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1422 }
1423
1424 pgmPhysPageWriteMonitor(pVM, &pCur->aPages[iPage],
1425 pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT));
1426 paLSPages[iPage].fWriteMonitored = 1;
1427 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1428 paLSPages[iPage].fDirty = 1;
1429 paLSPages[iPage].fZero = 0;
1430 paLSPages[iPage].fShared = 0;
1431#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1432 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1433#endif
1434 break;
1435
1436 case PGM_PAGE_STATE_WRITE_MONITORED:
1437 Assert(paLSPages[iPage].fWriteMonitored);
1438 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) == 0)
1439 {
1440#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1441 if (paLSPages[iPage].fWriteMonitoredJustNow)
1442 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1443 else
1444 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "scan");
1445#endif
1446 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1447 }
1448 else
1449 {
1450 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1451#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1452 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1453#endif
1454 if (!paLSPages[iPage].fDirty)
1455 {
1456 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1457 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1458 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1459 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1460 }
1461 }
1462 break;
1463
1464 case PGM_PAGE_STATE_ZERO:
1465 case PGM_PAGE_STATE_BALLOONED:
1466 if (!paLSPages[iPage].fZero)
1467 {
1468 if (!paLSPages[iPage].fDirty)
1469 {
1470 paLSPages[iPage].fDirty = 1;
1471 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1472 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1473 }
1474 paLSPages[iPage].fZero = 1;
1475 paLSPages[iPage].fShared = 0;
1476#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1477 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1478#endif
1479 }
1480 break;
1481
1482 case PGM_PAGE_STATE_SHARED:
1483 if (!paLSPages[iPage].fShared)
1484 {
1485 if (!paLSPages[iPage].fDirty)
1486 {
1487 paLSPages[iPage].fDirty = 1;
1488 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1489 if (paLSPages[iPage].fZero)
1490 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1491 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1492 }
1493 paLSPages[iPage].fZero = 0;
1494 paLSPages[iPage].fShared = 1;
1495#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1496 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1497#endif
1498 }
1499 break;
1500 }
1501 }
1502 else
1503 {
1504 /*
1505 * All other types => Ignore the page.
1506 */
1507 Assert(!paLSPages[iPage].fIgnore); /* skipped before switch */
1508 paLSPages[iPage].fIgnore = 1;
1509 if (paLSPages[iPage].fWriteMonitored)
1510 {
1511 /** @todo this doesn't hold water when we start monitoring MMIO2 and ROM shadow
1512 * pages! */
1513 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
1514 {
1515 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
1516 PGM_PAGE_SET_STATE(pVM, &pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
1517 Assert(pVM->pgm.s.cMonitoredPages > 0);
1518 pVM->pgm.s.cMonitoredPages--;
1519 }
1520 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1521 {
1522 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1523 Assert(pVM->pgm.s.cWrittenToPages > 0);
1524 pVM->pgm.s.cWrittenToPages--;
1525 }
1526 pVM->pgm.s.LiveSave.Ram.cMonitoredPages--;
1527 }
1528
1529 /** @todo the counting doesn't quite work out here. fix later? */
1530 if (paLSPages[iPage].fDirty)
1531 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1532 else
1533 {
1534 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1535 if (paLSPages[iPage].fZero)
1536 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1537 }
1538 pVM->pgm.s.LiveSave.cIgnoredPages++;
1539 }
1540 } /* for each page in range */
1541
1542 if (GCPhysCur != 0)
1543 break; /* Yield + ramrange change */
1544 GCPhysCur = pCur->GCPhysLast;
1545 }
1546 } /* for each range */
1547 } while (pCur);
1548 PGM_UNLOCK(pVM);
1549}
1550
1551
1552/**
1553 * Save quiescent RAM pages.
1554 *
1555 * @returns VBox status code.
1556 * @param pVM The cross context VM structure.
1557 * @param pSSM The SSM handle.
1558 * @param fLiveSave Whether it's a live save or not.
1559 * @param uPass The pass number.
1560 */
1561static int pgmR3SaveRamPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
1562{
1563 NOREF(fLiveSave);
1564
1565 /*
1566 * The RAM.
1567 */
1568 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
1569 RTGCPHYS GCPhysCur = 0;
1570 PPGMRAMRANGE pCur;
1571
1572 PGM_LOCK_VOID(pVM);
1573 do
1574 {
1575 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1576 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1577 {
1578 if ( pCur->GCPhysLast > GCPhysCur
1579 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1580 {
1581 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1582 uint32_t cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1583 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> GUEST_PAGE_SHIFT;
1584 GCPhysCur = 0;
1585 for (; iPage < cPages; iPage++)
1586 {
1587 /* Do yield first. */
1588 if ( uPass != SSM_PASS_FINAL
1589 && (iPage & 0x7ff) == 0x100
1590 && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX)
1591 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1592 {
1593 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1594 break; /* restart */
1595 }
1596
1597 PPGMPAGE pCurPage = &pCur->aPages[iPage];
1598
1599 /*
1600 * Only save pages that haven't changed since last scan and are dirty.
1601 */
1602 if ( uPass != SSM_PASS_FINAL
1603 && paLSPages)
1604 {
1605 if (!paLSPages[iPage].fDirty)
1606 continue;
1607 if (paLSPages[iPage].fWriteMonitoredJustNow)
1608 continue;
1609 if (paLSPages[iPage].fIgnore)
1610 continue;
1611 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM) /* in case of recent remappings */
1612 continue;
1613 if ( PGM_PAGE_GET_STATE(pCurPage)
1614 != ( paLSPages[iPage].fZero
1615 ? PGM_PAGE_STATE_ZERO
1616 : paLSPages[iPage].fShared
1617 ? PGM_PAGE_STATE_SHARED
1618 : PGM_PAGE_STATE_WRITE_MONITORED))
1619 continue;
1620 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) > 0)
1621 continue;
1622 }
1623 else
1624 {
1625 if ( paLSPages
1626 && !paLSPages[iPage].fDirty
1627 && !paLSPages[iPage].fIgnore)
1628 {
1629#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1630 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1631 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#1");
1632#endif
1633 continue;
1634 }
1635 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1636 continue;
1637 }
1638
1639 /*
1640 * Do the saving outside the PGM critsect since SSM may block on I/O.
1641 */
1642 int rc;
1643 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1644 bool fZero = PGM_PAGE_IS_ZERO(pCurPage);
1645 bool fBallooned = PGM_PAGE_IS_BALLOONED(pCurPage);
1646 bool fSkipped = false;
1647
1648 if (!fZero && !fBallooned)
1649 {
1650 /*
1651 * Copy the page and then save it outside the lock (since any
1652 * SSM call may block).
1653 */
1654 uint8_t abPage[GUEST_PAGE_SIZE];
1655 PGMPAGEMAPLOCK PgMpLck;
1656 void const *pvPage;
1657 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage, &PgMpLck);
1658 if (RT_SUCCESS(rc))
1659 {
1660 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
1661#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1662 if (paLSPages)
1663 pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage, "save#3");
1664#endif
1665 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1666 }
1667 PGM_UNLOCK(pVM);
1668 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
1669
1670 /* Try save some memory when restoring. */
1671 if (!ASMMemIsZero(pvPage, GUEST_PAGE_SIZE))
1672 {
1673 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1674 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
1675 else
1676 {
1677 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
1678 SSMR3PutGCPhys(pSSM, GCPhys);
1679 }
1680 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
1681 }
1682 else
1683 {
1684 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1685 rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO);
1686 else
1687 {
1688 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO | PGM_STATE_REC_FLAG_ADDR);
1689 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1690 }
1691 }
1692 }
1693 else
1694 {
1695 /*
1696 * Dirty zero or ballooned page.
1697 */
1698#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1699 if (paLSPages)
1700 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#2");
1701#endif
1702 PGM_UNLOCK(pVM);
1703
1704 uint8_t u8RecType = fBallooned ? PGM_STATE_REC_RAM_BALLOONED : PGM_STATE_REC_RAM_ZERO;
1705 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1706 rc = SSMR3PutU8(pSSM, u8RecType);
1707 else
1708 {
1709 SSMR3PutU8(pSSM, u8RecType | PGM_STATE_REC_FLAG_ADDR);
1710 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1711 }
1712 }
1713 if (RT_FAILURE(rc))
1714 return rc;
1715
1716 PGM_LOCK_VOID(pVM);
1717 if (!fSkipped)
1718 GCPhysLast = GCPhys;
1719 if (paLSPages)
1720 {
1721 paLSPages[iPage].fDirty = 0;
1722 pVM->pgm.s.LiveSave.Ram.cReadyPages++;
1723 if (fZero)
1724 pVM->pgm.s.LiveSave.Ram.cZeroPages++;
1725 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1726 pVM->pgm.s.LiveSave.cSavedPages++;
1727 }
1728 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1729 {
1730 GCPhysCur = GCPhys | GUEST_PAGE_OFFSET_MASK;
1731 break; /* restart */
1732 }
1733
1734 } /* for each page in range */
1735
1736 if (GCPhysCur != 0)
1737 break; /* Yield + ramrange change */
1738 GCPhysCur = pCur->GCPhysLast;
1739 }
1740 } /* for each range */
1741 } while (pCur);
1742
1743 PGM_UNLOCK(pVM);
1744
1745 return VINF_SUCCESS;
1746}
1747
1748
1749/**
1750 * Cleans up RAM pages after a live save.
1751 *
1752 * @param pVM The cross context VM structure.
1753 */
1754static void pgmR3DoneRamPages(PVM pVM)
1755{
1756 /*
1757 * Free the tracking arrays and disable write monitoring.
1758 *
1759 * Play nice with the PGM lock in case we're called while the VM is still
1760 * running. This means we have to delay the freeing since we wish to use
1761 * paLSPages as an indicator of which RAM ranges which we need to scan for
1762 * write monitored pages.
1763 */
1764 void *pvToFree = NULL;
1765 PPGMRAMRANGE pCur;
1766 uint32_t cMonitoredPages = 0;
1767 PGM_LOCK_VOID(pVM);
1768 do
1769 {
1770 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1771 {
1772 if (pCur->paLSPages)
1773 {
1774 if (pvToFree)
1775 {
1776 uint32_t idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1777 PGM_UNLOCK(pVM);
1778 MMR3HeapFree(pvToFree);
1779 pvToFree = NULL;
1780 PGM_LOCK_VOID(pVM);
1781 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1782 break; /* start over again. */
1783 }
1784
1785 pvToFree = pCur->paLSPages;
1786 pCur->paLSPages = NULL;
1787
1788 uint32_t iPage = pCur->cb >> GUEST_PAGE_SHIFT;
1789 while (iPage--)
1790 {
1791 PPGMPAGE pPage = &pCur->aPages[iPage];
1792 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
1793 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1794 {
1795 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1796 cMonitoredPages++;
1797 }
1798 }
1799 }
1800 }
1801 } while (pCur);
1802
1803 Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages);
1804 if (pVM->pgm.s.cMonitoredPages < cMonitoredPages)
1805 pVM->pgm.s.cMonitoredPages = 0;
1806 else
1807 pVM->pgm.s.cMonitoredPages -= cMonitoredPages;
1808
1809 PGM_UNLOCK(pVM);
1810
1811 MMR3HeapFree(pvToFree);
1812 pvToFree = NULL;
1813}
1814
1815
1816/**
1817 * @callback_method_impl{FNSSMINTLIVEEXEC}
1818 */
1819static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1820{
1821 int rc;
1822
1823 /*
1824 * Save the MMIO2 and ROM range IDs in pass 0.
1825 */
1826 if (uPass == 0)
1827 {
1828 rc = pgmR3SaveRamConfig(pVM, pSSM);
1829 if (RT_FAILURE(rc))
1830 return rc;
1831 rc = pgmR3SaveRomRanges(pVM, pSSM);
1832 if (RT_FAILURE(rc))
1833 return rc;
1834 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1835 if (RT_FAILURE(rc))
1836 return rc;
1837 }
1838 /*
1839 * Reset the page-per-second estimate to avoid inflation by the initial
1840 * load of zero pages. pgmR3LiveVote ASSUMES this is done at pass 7.
1841 */
1842 else if (uPass == 7)
1843 {
1844 pVM->pgm.s.LiveSave.cSavedPages = 0;
1845 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
1846 }
1847
1848 /*
1849 * Do the scanning.
1850 */
1851 pgmR3ScanRomPages(pVM);
1852 pgmR3ScanMmio2Pages(pVM, uPass);
1853 pgmR3ScanRamPages(pVM, false /*fFinalPass*/);
1854 pgmR3PoolClearAll(pVM, true /*fFlushRemTlb*/); /** @todo this could perhaps be optimized a bit. */
1855
1856 /*
1857 * Save the pages.
1858 */
1859 if (uPass == 0)
1860 rc = pgmR3SaveRomVirginPages( pVM, pSSM, true /*fLiveSave*/);
1861 else
1862 rc = VINF_SUCCESS;
1863 if (RT_SUCCESS(rc))
1864 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
1865 if (RT_SUCCESS(rc))
1866 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, uPass);
1867 if (RT_SUCCESS(rc))
1868 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, uPass);
1869 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes care of it.) */
1870
1871 return rc;
1872}
1873
1874
1875/**
1876 * @callback_method_impl{FNSSMINTLIVEVOTE}
1877 */
1878static DECLCALLBACK(int) pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1879{
1880 /*
1881 * Update and calculate parameters used in the decision making.
1882 */
1883 const uint32_t cHistoryEntries = RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory);
1884
1885 /* update history. */
1886 PGM_LOCK_VOID(pVM);
1887 uint32_t const cWrittenToPages = pVM->pgm.s.cWrittenToPages;
1888 PGM_UNLOCK(pVM);
1889 uint32_t const cDirtyNow = pVM->pgm.s.LiveSave.Rom.cDirtyPages
1890 + pVM->pgm.s.LiveSave.Mmio2.cDirtyPages
1891 + pVM->pgm.s.LiveSave.Ram.cDirtyPages
1892 + cWrittenToPages;
1893 uint32_t i = pVM->pgm.s.LiveSave.iDirtyPagesHistory;
1894 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = cDirtyNow;
1895 pVM->pgm.s.LiveSave.iDirtyPagesHistory = (i + 1) % cHistoryEntries;
1896
1897 /* calc shortterm average (4 passes). */
1898 AssertCompile(RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory) > 4);
1899 uint64_t cTotal = pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1900 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 1) % cHistoryEntries];
1901 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 2) % cHistoryEntries];
1902 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 3) % cHistoryEntries];
1903 uint32_t const cDirtyPagesShort = cTotal / 4;
1904 pVM->pgm.s.LiveSave.cDirtyPagesShort = cDirtyPagesShort;
1905
1906 /* calc longterm average. */
1907 cTotal = 0;
1908 if (uPass < cHistoryEntries)
1909 for (i = 0; i < cHistoryEntries && i <= uPass; i++)
1910 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1911 else
1912 for (i = 0; i < cHistoryEntries; i++)
1913 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1914 uint32_t const cDirtyPagesLong = cTotal / cHistoryEntries;
1915 pVM->pgm.s.LiveSave.cDirtyPagesLong = cDirtyPagesLong;
1916
1917 /* estimate the speed */
1918 uint64_t cNsElapsed = RTTimeNanoTS() - pVM->pgm.s.LiveSave.uSaveStartNS;
1919 uint32_t cPagesPerSecond = (uint32_t)( (long double)pVM->pgm.s.LiveSave.cSavedPages
1920 / ((long double)cNsElapsed / 1000000000.0) );
1921 pVM->pgm.s.LiveSave.cPagesPerSecond = cPagesPerSecond;
1922
1923 /*
1924 * Try make a decision.
1925 */
1926 if ( cDirtyPagesShort <= cDirtyPagesLong
1927 && ( cDirtyNow <= cDirtyPagesShort
1928 || cDirtyNow - cDirtyPagesShort < RT_MIN(cDirtyPagesShort / 8, 16)
1929 )
1930 )
1931 {
1932 if (uPass > 10)
1933 {
1934 uint32_t cMsLeftShort = (uint32_t)(cDirtyPagesShort / (long double)cPagesPerSecond * 1000.0);
1935 uint32_t cMsLeftLong = (uint32_t)(cDirtyPagesLong / (long double)cPagesPerSecond * 1000.0);
1936 uint32_t cMsMaxDowntime = SSMR3HandleMaxDowntime(pSSM);
1937 if (cMsMaxDowntime < 32)
1938 cMsMaxDowntime = 32;
1939 if ( ( cMsLeftLong <= cMsMaxDowntime
1940 && cMsLeftShort < cMsMaxDowntime)
1941 || cMsLeftShort < cMsMaxDowntime / 2
1942 )
1943 {
1944 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u|%ums cDirtyPagesLong=%u|%ums cMsMaxDowntime=%u\n",
1945 uPass, cDirtyPagesShort, cMsLeftShort, cDirtyPagesLong, cMsLeftLong, cMsMaxDowntime));
1946 return VINF_SUCCESS;
1947 }
1948 }
1949 else
1950 {
1951 if ( ( cDirtyPagesShort <= 128
1952 && cDirtyPagesLong <= 1024)
1953 || cDirtyPagesLong <= 256
1954 )
1955 {
1956 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u cDirtyPagesLong=%u\n", uPass, cDirtyPagesShort, cDirtyPagesLong));
1957 return VINF_SUCCESS;
1958 }
1959 }
1960 }
1961
1962 /*
1963 * Come up with a completion percentage. Currently this is a simple
1964 * dirty page (long term) vs. total pages ratio + some pass trickery.
1965 */
1966 unsigned uPctDirty = (unsigned)( (long double)cDirtyPagesLong
1967 / (pVM->pgm.s.cAllPages - pVM->pgm.s.LiveSave.cIgnoredPages - pVM->pgm.s.cZeroPages) );
1968 if (uPctDirty <= 100)
1969 SSMR3HandleReportLivePercent(pSSM, RT_MIN(100 - uPctDirty, uPass * 2));
1970 else
1971 AssertMsgFailed(("uPctDirty=%u cDirtyPagesLong=%#x cAllPages=%#x cIgnoredPages=%#x cZeroPages=%#x\n",
1972 uPctDirty, cDirtyPagesLong, pVM->pgm.s.cAllPages, pVM->pgm.s.LiveSave.cIgnoredPages, pVM->pgm.s.cZeroPages));
1973
1974 return VINF_SSM_VOTE_FOR_ANOTHER_PASS;
1975}
1976
1977
1978/**
1979 * @callback_method_impl{FNSSMINTLIVEPREP}
1980 *
1981 * This will attempt to allocate and initialize the tracking structures. It
1982 * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
1983 * pgmR3SaveDone will do the cleanups.
1984 */
1985static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
1986{
1987 /*
1988 * Indicate that we will be using the write monitoring.
1989 */
1990 PGM_LOCK_VOID(pVM);
1991 /** @todo find a way of mediating this when more users are added. */
1992 if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
1993 {
1994 PGM_UNLOCK(pVM);
1995 AssertLogRelFailedReturn(VERR_PGM_WRITE_MONITOR_ENGAGED);
1996 }
1997 pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
1998 PGM_UNLOCK(pVM);
1999
2000 /*
2001 * Initialize the statistics.
2002 */
2003 pVM->pgm.s.LiveSave.Rom.cReadyPages = 0;
2004 pVM->pgm.s.LiveSave.Rom.cDirtyPages = 0;
2005 pVM->pgm.s.LiveSave.Mmio2.cReadyPages = 0;
2006 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages = 0;
2007 pVM->pgm.s.LiveSave.Ram.cReadyPages = 0;
2008 pVM->pgm.s.LiveSave.Ram.cDirtyPages = 0;
2009 pVM->pgm.s.LiveSave.cIgnoredPages = 0;
2010 pVM->pgm.s.LiveSave.fActive = true;
2011 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory); i++)
2012 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = UINT32_MAX / 2;
2013 pVM->pgm.s.LiveSave.iDirtyPagesHistory = 0;
2014 pVM->pgm.s.LiveSave.cSavedPages = 0;
2015 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
2016 pVM->pgm.s.LiveSave.cPagesPerSecond = 8192;
2017
2018 /*
2019 * Per page type.
2020 */
2021 int rc = pgmR3PrepRomPages(pVM);
2022 if (RT_SUCCESS(rc))
2023 rc = pgmR3PrepMmio2Pages(pVM);
2024 if (RT_SUCCESS(rc))
2025 rc = pgmR3PrepRamPages(pVM);
2026
2027 NOREF(pSSM);
2028 return rc;
2029}
2030
2031
2032/**
2033 * @callback_method_impl{FNSSMINTSAVEEXEC}
2034 */
2035static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
2036{
2037 PPGM pPGM = &pVM->pgm.s;
2038
2039 /*
2040 * Lock PGM and set the no-more-writes indicator.
2041 */
2042 PGM_LOCK_VOID(pVM);
2043 pVM->pgm.s.fNoMorePhysWrites = true;
2044
2045 /*
2046 * Save basic data (required / unaffected by relocation).
2047 */
2048 int rc = SSMR3PutStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFields[0], NULL /*pvUser*/);
2049
2050 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus && RT_SUCCESS(rc); idCpu++)
2051 rc = SSMR3PutStruct(pSSM, &pVM->apCpusR3[idCpu]->pgm.s, &s_aPGMCpuFields[0]);
2052
2053 /*
2054 * Save the (remainder of the) memory.
2055 */
2056 if (RT_SUCCESS(rc))
2057 {
2058 if (pVM->pgm.s.LiveSave.fActive)
2059 {
2060 pgmR3ScanRomPages(pVM);
2061 pgmR3ScanMmio2Pages(pVM, SSM_PASS_FINAL);
2062 pgmR3ScanRamPages(pVM, true /*fFinalPass*/);
2063
2064 rc = pgmR3SaveShadowedRomPages( pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
2065 if (RT_SUCCESS(rc))
2066 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2067 if (RT_SUCCESS(rc))
2068 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2069 }
2070 else
2071 {
2072 rc = pgmR3SaveRamConfig(pVM, pSSM);
2073 if (RT_SUCCESS(rc))
2074 rc = pgmR3SaveRomRanges(pVM, pSSM);
2075 if (RT_SUCCESS(rc))
2076 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
2077 if (RT_SUCCESS(rc))
2078 rc = pgmR3SaveRomVirginPages( pVM, pSSM, false /*fLiveSave*/);
2079 if (RT_SUCCESS(rc))
2080 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
2081 if (RT_SUCCESS(rc))
2082 rc = pgmR3SaveMmio2Pages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2083 if (RT_SUCCESS(rc))
2084 rc = pgmR3SaveRamPages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2085 }
2086 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
2087 }
2088
2089 PGM_UNLOCK(pVM);
2090 return rc;
2091}
2092
2093
2094/**
2095 * @callback_method_impl{FNSSMINTSAVEDONE}
2096 */
2097static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
2098{
2099 /*
2100 * Do per page type cleanups first.
2101 */
2102 if (pVM->pgm.s.LiveSave.fActive)
2103 {
2104 pgmR3DoneRomPages(pVM);
2105 pgmR3DoneMmio2Pages(pVM);
2106 pgmR3DoneRamPages(pVM);
2107 }
2108
2109 /*
2110 * Clear the live save indicator and disengage write monitoring.
2111 */
2112 PGM_LOCK_VOID(pVM);
2113 pVM->pgm.s.LiveSave.fActive = false;
2114 /** @todo this is blindly assuming that we're the only user of write
2115 * monitoring. Fix this when more users are added. */
2116 pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
2117 PGM_UNLOCK(pVM);
2118
2119 NOREF(pSSM);
2120 return VINF_SUCCESS;
2121}
2122
2123
2124/**
2125 * @callback_method_impl{FNSSMINTLOADPREP}
2126 */
2127static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
2128{
2129 /*
2130 * Call the reset function to make sure all the memory is cleared.
2131 */
2132 PGMR3Reset(pVM);
2133 pVM->pgm.s.LiveSave.fActive = false;
2134 NOREF(pSSM);
2135 return VINF_SUCCESS;
2136}
2137
2138
2139/**
2140 * Load an ignored page.
2141 *
2142 * @returns VBox status code.
2143 * @param pSSM The saved state handle.
2144 */
2145static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
2146{
2147 uint8_t abPage[GUEST_PAGE_SIZE];
2148 return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
2149}
2150
2151
2152/**
2153 * Compares a page with an old save type value.
2154 *
2155 * @returns true if equal, false if not.
2156 * @param pPage The page to compare.
2157 * @param uOldType The old type value from the saved state.
2158 */
2159DECLINLINE(bool) pgmR3CompareNewAndOldPageTypes(PPGMPAGE pPage, uint8_t uOldType)
2160{
2161 uint8_t uOldPageType;
2162 switch (PGM_PAGE_GET_TYPE(pPage))
2163 {
2164 case PGMPAGETYPE_INVALID: uOldPageType = PGMPAGETYPE_OLD_INVALID; break;
2165 case PGMPAGETYPE_RAM: uOldPageType = PGMPAGETYPE_OLD_RAM; break;
2166 case PGMPAGETYPE_MMIO2: uOldPageType = PGMPAGETYPE_OLD_MMIO2; break;
2167 case PGMPAGETYPE_MMIO2_ALIAS_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO; break;
2168 case PGMPAGETYPE_ROM_SHADOW: uOldPageType = PGMPAGETYPE_OLD_ROM_SHADOW; break;
2169 case PGMPAGETYPE_ROM: uOldPageType = PGMPAGETYPE_OLD_ROM; break;
2170 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: RT_FALL_THRU();
2171 case PGMPAGETYPE_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO; break;
2172 default:
2173 AssertFailed();
2174 uOldPageType = PGMPAGETYPE_OLD_INVALID;
2175 break;
2176 }
2177 return uOldPageType == uOldType;
2178}
2179
2180
2181/**
2182 * Loads a page without any bits in the saved state, i.e. making sure it's
2183 * really zero.
2184 *
2185 * @returns VBox status code.
2186 * @param pVM The cross context VM structure.
2187 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2188 * state).
2189 * @param pPage The guest page tracking structure.
2190 * @param GCPhys The page address.
2191 * @param pRam The ram range (logging).
2192 */
2193static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2194{
2195 if ( uOldType != PGMPAGETYPE_OLD_INVALID
2196 && !pgmR3CompareNewAndOldPageTypes(pPage, uOldType))
2197 return VERR_SSM_UNEXPECTED_DATA;
2198
2199 /* I think this should be sufficient. */
2200 if ( !PGM_PAGE_IS_ZERO(pPage)
2201 && !PGM_PAGE_IS_BALLOONED(pPage))
2202 return VERR_SSM_UNEXPECTED_DATA;
2203
2204 NOREF(pVM);
2205 NOREF(GCPhys);
2206 NOREF(pRam);
2207 return VINF_SUCCESS;
2208}
2209
2210
2211/**
2212 * Loads a page from the saved state.
2213 *
2214 * @returns VBox status code.
2215 * @param pVM The cross context VM structure.
2216 * @param pSSM The SSM handle.
2217 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2218 * state).
2219 * @param pPage The guest page tracking structure.
2220 * @param GCPhys The page address.
2221 * @param pRam The ram range (logging).
2222 */
2223static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2224{
2225 /*
2226 * Match up the type, dealing with MMIO2 aliases (dropped).
2227 */
2228 AssertLogRelMsgReturn( uOldType == PGMPAGETYPE_INVALID
2229 || pgmR3CompareNewAndOldPageTypes(pPage, uOldType)
2230 /* kudge for the expanded PXE bios (r67885) - @bugref{5687}: */
2231 || ( uOldType == PGMPAGETYPE_OLD_RAM
2232 && GCPhys >= 0xed000
2233 && GCPhys <= 0xeffff
2234 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM)
2235 ,
2236 ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
2237 VERR_SSM_UNEXPECTED_DATA);
2238
2239 /*
2240 * Load the page.
2241 */
2242 PGMPAGEMAPLOCK PgMpLck;
2243 void *pvPage;
2244 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
2245 if (RT_SUCCESS(rc))
2246 {
2247 rc = SSMR3GetMem(pSSM, pvPage, GUEST_PAGE_SIZE);
2248 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2249 }
2250
2251 return rc;
2252}
2253
2254
2255/**
2256 * Loads a page (counter part to pgmR3SavePage).
2257 *
2258 * @returns VBox status code, fully bitched errors.
2259 * @param pVM The cross context VM structure.
2260 * @param pSSM The SSM handle.
2261 * @param uOldType The page type.
2262 * @param pPage The page.
2263 * @param GCPhys The page address.
2264 * @param pRam The RAM range (for error messages).
2265 */
2266static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2267{
2268 uint8_t uState;
2269 int rc = SSMR3GetU8(pSSM, &uState);
2270 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
2271 if (uState == 0 /* zero */)
2272 rc = pgmR3LoadPageZeroOld(pVM, uOldType, pPage, GCPhys, pRam);
2273 else if (uState == 1)
2274 rc = pgmR3LoadPageBitsOld(pVM, pSSM, uOldType, pPage, GCPhys, pRam);
2275 else
2276 rc = VERR_PGM_INVALID_SAVED_PAGE_STATE;
2277 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uOldType=%d GCPhys=%RGp %s rc=%Rrc\n",
2278 pPage, uState, uOldType, GCPhys, pRam->pszDesc, rc),
2279 rc);
2280 return VINF_SUCCESS;
2281}
2282
2283
2284/**
2285 * Loads a shadowed ROM page.
2286 *
2287 * @returns VBox status code, errors are fully bitched.
2288 * @param pVM The cross context VM structure.
2289 * @param pSSM The saved state handle.
2290 * @param pPage The page.
2291 * @param GCPhys The page address.
2292 * @param pRam The RAM range (for error messages).
2293 */
2294static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2295{
2296 /*
2297 * Load and set the protection first, then load the two pages, the first
2298 * one is the active the other is the passive.
2299 */
2300 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
2301 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2302
2303 uint8_t uProt;
2304 int rc = SSMR3GetU8(pSSM, &uProt);
2305 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
2306 PGMROMPROT enmProt = (PGMROMPROT)uProt;
2307 AssertLogRelMsgReturn( enmProt >= PGMROMPROT_INVALID
2308 && enmProt < PGMROMPROT_END,
2309 ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
2310 VERR_SSM_UNEXPECTED_DATA);
2311
2312 if (pRomPage->enmProt != enmProt)
2313 {
2314 rc = PGMR3PhysRomProtect(pVM, GCPhys, GUEST_PAGE_SIZE, enmProt);
2315 AssertLogRelRCReturn(rc, rc);
2316 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2317 }
2318
2319 PPGMPAGE pPageActive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2320 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2321 uint8_t u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM : PGMPAGETYPE_ROM_SHADOW;
2322 uint8_t u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
2323
2324 /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
2325 * used down the line (will the 2nd page will be written to the first
2326 * one because of a false TLB hit since the TLB is using GCPhys and
2327 * doesn't check the HCPhys of the desired page). */
2328 rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
2329 if (RT_SUCCESS(rc))
2330 {
2331 *pPageActive = *pPage;
2332 rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
2333 }
2334 return rc;
2335}
2336
2337/**
2338 * Ram range flags and bits for older versions of the saved state.
2339 *
2340 * @returns VBox status code.
2341 *
2342 * @param pVM The cross context VM structure.
2343 * @param pSSM The SSM handle.
2344 * @param uVersion The saved state version.
2345 */
2346static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2347{
2348 PPGM pPGM = &pVM->pgm.s;
2349
2350 /*
2351 * Ram range flags and bits.
2352 */
2353 uint32_t i = 0;
2354 for (PPGMRAMRANGE pRam = pPGM->pRamRangesXR3; ; pRam = pRam->pNextR3, i++)
2355 {
2356 /* Check the sequence number / separator. */
2357 uint32_t u32Sep;
2358 int rc = SSMR3GetU32(pSSM, &u32Sep);
2359 if (RT_FAILURE(rc))
2360 return rc;
2361 if (u32Sep == ~0U)
2362 break;
2363 if (u32Sep != i)
2364 {
2365 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2366 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2367 }
2368 AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2369
2370 /* Get the range details. */
2371 RTGCPHYS GCPhys;
2372 SSMR3GetGCPhys(pSSM, &GCPhys);
2373 RTGCPHYS GCPhysLast;
2374 SSMR3GetGCPhys(pSSM, &GCPhysLast);
2375 RTGCPHYS cb;
2376 SSMR3GetGCPhys(pSSM, &cb);
2377 uint8_t fHaveBits;
2378 rc = SSMR3GetU8(pSSM, &fHaveBits);
2379 if (RT_FAILURE(rc))
2380 return rc;
2381 if (fHaveBits & ~1)
2382 {
2383 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2384 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2385 }
2386 size_t cchDesc = 0;
2387 char szDesc[256];
2388 szDesc[0] = '\0';
2389 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2390 {
2391 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
2392 if (RT_FAILURE(rc))
2393 return rc;
2394 /* Since we've modified the description strings in r45878, only compare
2395 them if the saved state is more recent. */
2396 if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
2397 cchDesc = strlen(szDesc);
2398 }
2399
2400 /*
2401 * Match it up with the current range.
2402 *
2403 * Note there is a hack for dealing with the high BIOS mapping
2404 * in the old saved state format, this means we might not have
2405 * a 1:1 match on success.
2406 */
2407 if ( ( GCPhys != pRam->GCPhys
2408 || GCPhysLast != pRam->GCPhysLast
2409 || cb != pRam->cb
2410 || ( cchDesc
2411 && strcmp(szDesc, pRam->pszDesc)) )
2412 /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
2413 && ( uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
2414 || GCPhys != UINT32_C(0xfff80000)
2415 || GCPhysLast != UINT32_C(0xffffffff)
2416 || pRam->GCPhysLast != GCPhysLast
2417 || pRam->GCPhys < GCPhys
2418 || !fHaveBits)
2419 )
2420 {
2421 LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
2422 "State : %RGp-%RGp %RGp bytes %s %s\n",
2423 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc,
2424 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
2425 /*
2426 * If we're loading a state for debugging purpose, don't make a fuss if
2427 * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
2428 */
2429 if ( SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
2430 || GCPhys < 8 * _1M)
2431 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2432 N_("RAM range mismatch; saved={%RGp-%RGp %RGp bytes %s %s} config={%RGp-%RGp %RGp bytes %s %s}"),
2433 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc,
2434 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc);
2435
2436 AssertMsgFailed(("debug skipping not implemented, sorry\n"));
2437 continue;
2438 }
2439
2440 uint32_t cPages = (GCPhysLast - GCPhys + 1) >> GUEST_PAGE_SHIFT;
2441 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2442 {
2443 /*
2444 * Load the pages one by one.
2445 */
2446 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2447 {
2448 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2449 PPGMPAGE pPage = &pRam->aPages[iPage];
2450 uint8_t uOldType;
2451 rc = SSMR3GetU8(pSSM, &uOldType);
2452 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
2453 if (uOldType == PGMPAGETYPE_OLD_ROM_SHADOW)
2454 rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
2455 else
2456 rc = pgmR3LoadPageOld(pVM, pSSM, uOldType, pPage, GCPhysPage, pRam);
2457 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2458 }
2459 }
2460 else
2461 {
2462 /*
2463 * Old format.
2464 */
2465
2466 /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
2467 The rest is generally irrelevant and wrong since the stuff have to match registrations. */
2468 uint32_t fFlags = 0;
2469 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2470 {
2471 uint16_t u16Flags;
2472 rc = SSMR3GetU16(pSSM, &u16Flags);
2473 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2474 fFlags |= u16Flags;
2475 }
2476
2477 /* Load the bits */
2478 if ( !fHaveBits
2479 && GCPhysLast < UINT32_C(0xe0000000))
2480 {
2481 /*
2482 * Dynamic chunks.
2483 */
2484 const uint32_t cPagesInChunk = (1*1024*1024) >> GUEST_PAGE_SHIFT;
2485 AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
2486 ("cPages=%#x cPagesInChunk=%#x GCPhys=%RGp %s\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
2487 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2488
2489 for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
2490 {
2491 uint8_t fPresent;
2492 rc = SSMR3GetU8(pSSM, &fPresent);
2493 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2494 AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
2495 ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
2496 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2497
2498 for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
2499 {
2500 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2501 PPGMPAGE pPage = &pRam->aPages[iPage];
2502 if (fPresent)
2503 {
2504 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO
2505 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
2506 rc = pgmR3LoadPageToDevNullOld(pSSM);
2507 else
2508 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2509 }
2510 else
2511 rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2512 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2513 }
2514 }
2515 }
2516 else if (pRam->pvR3)
2517 {
2518 /*
2519 * MMIO2.
2520 */
2521 AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
2522 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2523 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2524 AssertLogRelMsgReturn(pRam->pvR3,
2525 ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
2526 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2527
2528 rc = SSMR3GetMem(pSSM, pRam->pvR3, pRam->cb);
2529 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
2530 }
2531 else if (GCPhysLast < UINT32_C(0xfff80000))
2532 {
2533 /*
2534 * PCI MMIO, no pages saved.
2535 */
2536 }
2537 else
2538 {
2539 /*
2540 * Load the 0xfff80000..0xffffffff BIOS range.
2541 * It starts with X reserved pages that we have to skip over since
2542 * the RAMRANGE create by the new code won't include those.
2543 */
2544 AssertLogRelMsgReturn( !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
2545 && (fFlags & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
2546 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2547 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2548 AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
2549 ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
2550 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2551
2552 /* Skip wasted reserved pages before the ROM. */
2553 while (GCPhys < pRam->GCPhys)
2554 {
2555 rc = pgmR3LoadPageToDevNullOld(pSSM);
2556 GCPhys += GUEST_PAGE_SIZE;
2557 }
2558
2559 /* Load the bios pages. */
2560 cPages = pRam->cb >> GUEST_PAGE_SHIFT;
2561 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2562 {
2563 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2564 PPGMPAGE pPage = &pRam->aPages[iPage];
2565
2566 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
2567 ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
2568 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2569 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
2570 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2571 }
2572 }
2573 }
2574 }
2575
2576 return VINF_SUCCESS;
2577}
2578
2579
2580/**
2581 * Worker for pgmR3Load and pgmR3LoadLocked.
2582 *
2583 * @returns VBox status code.
2584 *
2585 * @param pVM The cross context VM structure.
2586 * @param pSSM The SSM handle.
2587 * @param uVersion The PGM saved state unit version.
2588 * @param uPass The pass number.
2589 *
2590 * @todo This needs splitting up if more record types or code twists are
2591 * added...
2592 */
2593static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2594{
2595 NOREF(uPass);
2596
2597 /*
2598 * Process page records until we hit the terminator.
2599 */
2600 RTGCPHYS GCPhys = NIL_RTGCPHYS;
2601 PPGMRAMRANGE pRamHint = NULL;
2602 uint8_t id = UINT8_MAX;
2603 uint32_t iPage = UINT32_MAX - 10;
2604 PPGMROMRANGE pRom = NULL;
2605 PPGMREGMMIO2RANGE pRegMmio = NULL;
2606
2607 /*
2608 * We batch up pages that should be freed instead of calling GMM for
2609 * each and every one of them. Note that we'll lose the pages in most
2610 * failure paths - this should probably be addressed one day.
2611 */
2612 uint32_t cPendingPages = 0;
2613 PGMMFREEPAGESREQ pReq;
2614 int rc = GMMR3FreePagesPrepare(pVM, &pReq, 128 /* batch size */, GMMACCOUNT_BASE);
2615 AssertLogRelRCReturn(rc, rc);
2616
2617 for (;;)
2618 {
2619 /*
2620 * Get the record type and flags.
2621 */
2622 uint8_t u8;
2623 rc = SSMR3GetU8(pSSM, &u8);
2624 if (RT_FAILURE(rc))
2625 return rc;
2626 if (u8 == PGM_STATE_REC_END)
2627 {
2628 /*
2629 * Finish off any pages pending freeing.
2630 */
2631 if (cPendingPages)
2632 {
2633 Log(("pgmR3LoadMemory: GMMR3FreePagesPerform pVM=%p cPendingPages=%u\n", pVM, cPendingPages));
2634 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2635 AssertLogRelRCReturn(rc, rc);
2636 }
2637 GMMR3FreePagesCleanup(pReq);
2638 return VINF_SUCCESS;
2639 }
2640 AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2641 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2642 {
2643 /*
2644 * RAM page.
2645 */
2646 case PGM_STATE_REC_RAM_ZERO:
2647 case PGM_STATE_REC_RAM_RAW:
2648 case PGM_STATE_REC_RAM_BALLOONED:
2649 {
2650 /*
2651 * Get the address and resolve it into a page descriptor.
2652 */
2653 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2654 GCPhys += GUEST_PAGE_SIZE;
2655 else
2656 {
2657 rc = SSMR3GetGCPhys(pSSM, &GCPhys);
2658 if (RT_FAILURE(rc))
2659 return rc;
2660 }
2661 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2662
2663 PPGMPAGE pPage;
2664 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
2665 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2666
2667 /*
2668 * Take action according to the record type.
2669 */
2670 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2671 {
2672 case PGM_STATE_REC_RAM_ZERO:
2673 {
2674 if (PGM_PAGE_IS_ZERO(pPage))
2675 break;
2676
2677 /* Ballooned pages must be unmarked (live snapshot and
2678 teleportation scenarios). */
2679 if (PGM_PAGE_IS_BALLOONED(pPage))
2680 {
2681 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2682 if (uVersion == PGM_SAVED_STATE_VERSION_BALLOON_BROKEN)
2683 break;
2684 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2685 break;
2686 }
2687
2688 AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_UNEXPECTED_PAGE_STATE);
2689
2690 /* If this is a ROM page, we must clear it and not try to
2691 * free it. Ditto if the VM is using RamPreAlloc (see
2692 * @bugref{6318}). */
2693 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM
2694 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM_SHADOW
2695#ifdef VBOX_WITH_PGM_NEM_MODE
2696 || pVM->pgm.s.fNemMode
2697#endif
2698 || pVM->pgm.s.fRamPreAlloc)
2699 {
2700 PGMPAGEMAPLOCK PgMpLck;
2701 void *pvDstPage;
2702 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2703 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2704
2705 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
2706 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2707 }
2708 /* Free it only if it's not part of a previously
2709 allocated large page (no need to clear the page). */
2710 else if ( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2711 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED)
2712 {
2713 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2714 AssertRCReturn(rc, rc);
2715 }
2716 /** @todo handle large pages (see @bugref{5545}) */
2717 break;
2718 }
2719
2720 case PGM_STATE_REC_RAM_BALLOONED:
2721 {
2722 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2723 if (PGM_PAGE_IS_BALLOONED(pPage))
2724 break;
2725
2726 /* We don't map ballooned pages in our shadow page tables, let's
2727 just free it if allocated and mark as ballooned. See @bugref{5515}. */
2728 if (PGM_PAGE_IS_ALLOCATED(pPage))
2729 {
2730 /** @todo handle large pages + ballooning when it works. (see @bugref{5515},
2731 * @bugref{5545}). */
2732 AssertLogRelMsgReturn( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2733 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED,
2734 ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_LOAD_UNEXPECTED_PAGE_TYPE);
2735
2736 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2737 AssertRCReturn(rc, rc);
2738 }
2739 Assert(PGM_PAGE_IS_ZERO(pPage));
2740 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
2741 break;
2742 }
2743
2744 case PGM_STATE_REC_RAM_RAW:
2745 {
2746 PGMPAGEMAPLOCK PgMpLck;
2747 void *pvDstPage;
2748 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2749 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2750 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
2751 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2752 if (RT_FAILURE(rc))
2753 return rc;
2754 break;
2755 }
2756
2757 default:
2758 AssertMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
2759 }
2760 id = UINT8_MAX;
2761 break;
2762 }
2763
2764 /*
2765 * MMIO2 page.
2766 */
2767 case PGM_STATE_REC_MMIO2_RAW:
2768 case PGM_STATE_REC_MMIO2_ZERO:
2769 {
2770 /*
2771 * Get the ID + page number and resolved that into a MMIO2 page.
2772 */
2773 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2774 iPage++;
2775 else
2776 {
2777 SSMR3GetU8(pSSM, &id);
2778 rc = SSMR3GetU32(pSSM, &iPage);
2779 if (RT_FAILURE(rc))
2780 return rc;
2781 }
2782 if ( !pRegMmio
2783 || pRegMmio->idSavedState != id)
2784 {
2785 for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
2786 if (pRegMmio->idSavedState == id)
2787 break;
2788 AssertLogRelMsgReturn(pRegMmio, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND);
2789 }
2790 AssertLogRelMsgReturn(iPage < (pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT),
2791 ("iPage=%#x cb=%RGp %s\n", iPage, pRegMmio->RamRange.cb, pRegMmio->RamRange.pszDesc),
2792 VERR_PGM_SAVED_MMIO2_PAGE_NOT_FOUND);
2793 void *pvDstPage = (uint8_t *)pRegMmio->RamRange.pvR3 + ((size_t)iPage << GUEST_PAGE_SHIFT);
2794
2795 /*
2796 * Load the page bits.
2797 */
2798 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_MMIO2_ZERO)
2799 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
2800 else
2801 {
2802 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
2803 if (RT_FAILURE(rc))
2804 return rc;
2805 }
2806 GCPhys = NIL_RTGCPHYS;
2807 break;
2808 }
2809
2810 /*
2811 * ROM pages.
2812 */
2813 case PGM_STATE_REC_ROM_VIRGIN:
2814 case PGM_STATE_REC_ROM_SHW_RAW:
2815 case PGM_STATE_REC_ROM_SHW_ZERO:
2816 case PGM_STATE_REC_ROM_PROT:
2817 {
2818 /*
2819 * Get the ID + page number and resolved that into a ROM page descriptor.
2820 */
2821 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2822 iPage++;
2823 else
2824 {
2825 SSMR3GetU8(pSSM, &id);
2826 rc = SSMR3GetU32(pSSM, &iPage);
2827 if (RT_FAILURE(rc))
2828 return rc;
2829 }
2830 if ( !pRom
2831 || pRom->idSavedState != id)
2832 {
2833 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2834 if (pRom->idSavedState == id)
2835 break;
2836 AssertLogRelMsgReturn(pRom, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_ROM_RANGE_NOT_FOUND);
2837 }
2838 AssertLogRelMsgReturn(iPage < (pRom->cb >> GUEST_PAGE_SHIFT),
2839 ("iPage=%#x cb=%RGp %s\n", iPage, pRom->cb, pRom->pszDesc),
2840 VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2841 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2842 GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
2843
2844 /*
2845 * Get and set the protection.
2846 */
2847 uint8_t u8Prot;
2848 rc = SSMR3GetU8(pSSM, &u8Prot);
2849 if (RT_FAILURE(rc))
2850 return rc;
2851 PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
2852 AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_PGM_SAVED_ROM_PAGE_PROT);
2853
2854 if (enmProt != pRomPage->enmProt)
2855 {
2856 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
2857 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2858 N_("Protection change of unshadowed ROM page: GCPhys=%RGp enmProt=%d %s"),
2859 GCPhys, enmProt, pRom->pszDesc);
2860 rc = PGMR3PhysRomProtect(pVM, GCPhys, GUEST_PAGE_SIZE, enmProt);
2861 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2862 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2863 }
2864 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
2865 break; /* done */
2866
2867 /*
2868 * Get the right page descriptor.
2869 */
2870 PPGMPAGE pRealPage;
2871 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2872 {
2873 case PGM_STATE_REC_ROM_VIRGIN:
2874 if (!PGMROMPROT_IS_ROM(enmProt))
2875 pRealPage = &pRomPage->Virgin;
2876 else
2877 pRealPage = NULL;
2878 break;
2879
2880 case PGM_STATE_REC_ROM_SHW_RAW:
2881 case PGM_STATE_REC_ROM_SHW_ZERO:
2882 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
2883 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2884 N_("Shadowed / non-shadowed page type mismatch: GCPhys=%RGp enmProt=%d %s"),
2885 GCPhys, enmProt, pRom->pszDesc);
2886 if (PGMROMPROT_IS_ROM(enmProt))
2887 pRealPage = &pRomPage->Shadow;
2888 else
2889 pRealPage = NULL;
2890 break;
2891
2892 default: AssertLogRelFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE); /* shut up gcc */
2893 }
2894#ifdef VBOX_WITH_PGM_NEM_MODE
2895 bool const fAltPage = pRealPage != NULL;
2896#endif
2897 if (!pRealPage)
2898 {
2899 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pRealPage, &pRamHint);
2900 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2901 }
2902
2903 /*
2904 * Make it writable and map it (if necessary).
2905 */
2906 void *pvDstPage = NULL;
2907 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2908 {
2909 case PGM_STATE_REC_ROM_SHW_ZERO:
2910 if ( PGM_PAGE_IS_ZERO(pRealPage)
2911 || PGM_PAGE_IS_BALLOONED(pRealPage))
2912 break;
2913 /** @todo implement zero page replacing. */
2914 RT_FALL_THRU();
2915 case PGM_STATE_REC_ROM_VIRGIN:
2916 case PGM_STATE_REC_ROM_SHW_RAW:
2917#ifdef VBOX_WITH_PGM_NEM_MODE
2918 if (fAltPage && pVM->pgm.s.fNemMode)
2919 pvDstPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
2920 else
2921#endif
2922 {
2923 rc = pgmPhysPageMakeWritableAndMap(pVM, pRealPage, GCPhys, &pvDstPage);
2924 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2925 }
2926 break;
2927 }
2928
2929 /*
2930 * Load the bits.
2931 */
2932 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2933 {
2934 case PGM_STATE_REC_ROM_SHW_ZERO:
2935 if (pvDstPage)
2936 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
2937 break;
2938
2939 case PGM_STATE_REC_ROM_VIRGIN:
2940 case PGM_STATE_REC_ROM_SHW_RAW:
2941 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
2942 if (RT_FAILURE(rc))
2943 return rc;
2944 break;
2945 }
2946 GCPhys = NIL_RTGCPHYS;
2947 break;
2948 }
2949
2950 /*
2951 * Unknown type.
2952 */
2953 default:
2954 AssertLogRelMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
2955 }
2956 } /* forever */
2957}
2958
2959
2960/**
2961 * Worker for pgmR3Load.
2962 *
2963 * @returns VBox status code.
2964 *
2965 * @param pVM The cross context VM structure.
2966 * @param pSSM The SSM handle.
2967 * @param uVersion The saved state version.
2968 */
2969static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2970{
2971 PPGM pPGM = &pVM->pgm.s;
2972 int rc;
2973 uint32_t u32Sep;
2974
2975 /*
2976 * Load basic data (required / unaffected by relocation).
2977 */
2978 if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
2979 {
2980 if (uVersion > PGM_SAVED_STATE_VERSION_PRE_BALLOON)
2981 rc = SSMR3GetStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFields[0], NULL /*pvUser*/);
2982 else
2983 rc = SSMR3GetStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFieldsPreBalloon[0], NULL /*pvUser*/);
2984
2985 AssertLogRelRCReturn(rc, rc);
2986
2987 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2988 {
2989 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
2990 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFieldsPrePae[0]);
2991 else
2992 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFields[0]);
2993 AssertLogRelRCReturn(rc, rc);
2994 }
2995 }
2996 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2997 {
2998 AssertRelease(pVM->cCpus == 1);
2999
3000 PGMOLD pgmOld;
3001 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
3002 AssertLogRelRCReturn(rc, rc);
3003
3004 PVMCPU pVCpu0 = pVM->apCpusR3[0];
3005 pVCpu0->pgm.s.fA20Enabled = pgmOld.fA20Enabled;
3006 pVCpu0->pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
3007 pVCpu0->pgm.s.enmGuestMode = pgmOld.enmGuestMode;
3008 }
3009 else
3010 {
3011 AssertRelease(pVM->cCpus == 1);
3012
3013 SSMR3Skip(pSSM, sizeof(bool));
3014 RTGCPTR GCPtrIgn;
3015 SSMR3GetGCPtr(pSSM, &GCPtrIgn);
3016 SSMR3Skip(pSSM, sizeof(uint32_t));
3017
3018 uint32_t cbRamSizeIgnored;
3019 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
3020 if (RT_FAILURE(rc))
3021 return rc;
3022 PVMCPU pVCpu0 = pVM->apCpusR3[0];
3023 SSMR3GetGCPhys(pSSM, &pVCpu0->pgm.s.GCPhysA20Mask);
3024
3025 uint32_t u32 = 0;
3026 SSMR3GetUInt(pSSM, &u32);
3027 pVCpu0->pgm.s.fA20Enabled = !!u32;
3028 SSMR3GetUInt(pSSM, &pVCpu0->pgm.s.fSyncFlags);
3029 RTUINT uGuestMode;
3030 SSMR3GetUInt(pSSM, &uGuestMode);
3031 pVCpu0->pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
3032
3033 /* check separator. */
3034 SSMR3GetU32(pSSM, &u32Sep);
3035 if (RT_FAILURE(rc))
3036 return rc;
3037 if (u32Sep != (uint32_t)~0)
3038 {
3039 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
3040 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
3041 }
3042 }
3043
3044 /*
3045 * Fix the A20 mask.
3046 */
3047 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3048 {
3049 PVMCPU pVCpu = pVM->apCpusR3[i];
3050 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!pVCpu->pgm.s.fA20Enabled << 20);
3051 pgmR3RefreshShadowModeAfterA20Change(pVCpu);
3052 }
3053
3054 /*
3055 * The guest mappings - skipped now, see re-fixation in the caller.
3056 */
3057 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
3058 {
3059 for (uint32_t i = 0; ; i++)
3060 {
3061 rc = SSMR3GetU32(pSSM, &u32Sep); /* sequence number */
3062 if (RT_FAILURE(rc))
3063 return rc;
3064 if (u32Sep == ~0U)
3065 break;
3066 AssertMsgReturn(u32Sep == i, ("u32Sep=%#x i=%#x\n", u32Sep, i), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
3067
3068 char szDesc[256];
3069 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
3070 if (RT_FAILURE(rc))
3071 return rc;
3072 RTGCPTR GCPtrIgnore;
3073 SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* GCPtr */
3074 rc = SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* cPTs */
3075 if (RT_FAILURE(rc))
3076 return rc;
3077 }
3078 }
3079
3080 /*
3081 * Load the RAM contents.
3082 */
3083 if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
3084 {
3085 if (!pVM->pgm.s.LiveSave.fActive)
3086 {
3087 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3088 {
3089 rc = pgmR3LoadRamConfig(pVM, pSSM);
3090 if (RT_FAILURE(rc))
3091 return rc;
3092 }
3093 rc = pgmR3LoadRomRanges(pVM, pSSM);
3094 if (RT_FAILURE(rc))
3095 return rc;
3096 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3097 if (RT_FAILURE(rc))
3098 return rc;
3099 }
3100
3101 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, SSM_PASS_FINAL);
3102 }
3103 else
3104 rc = pgmR3LoadMemoryOld(pVM, pSSM, uVersion);
3105
3106 /* Refresh balloon accounting. */
3107 if (pVM->pgm.s.cBalloonedPages)
3108 {
3109 Log(("pgmR3LoadFinalLocked: pVM=%p cBalloonedPages=%#x\n", pVM, pVM->pgm.s.cBalloonedPages));
3110 rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_INFLATE, pVM->pgm.s.cBalloonedPages);
3111 AssertRCReturn(rc, rc);
3112 }
3113 return rc;
3114}
3115
3116
3117/**
3118 * @callback_method_impl{FNSSMINTLOADEXEC}
3119 */
3120static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3121{
3122 int rc;
3123
3124 /*
3125 * Validate version.
3126 */
3127 if ( ( uPass != SSM_PASS_FINAL
3128 && uVersion != PGM_SAVED_STATE_VERSION
3129 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3130 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3131 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3132 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3133 || ( uVersion != PGM_SAVED_STATE_VERSION
3134 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3135 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3136 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3137 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG
3138 && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
3139 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
3140 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
3141 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
3142 )
3143 {
3144 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
3145 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3146 }
3147
3148 /*
3149 * Do the loading while owning the lock because a bunch of the functions
3150 * we're using requires this.
3151 */
3152 if (uPass != SSM_PASS_FINAL)
3153 {
3154 PGM_LOCK_VOID(pVM);
3155 if (uPass != 0)
3156 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3157 else
3158 {
3159 pVM->pgm.s.LiveSave.fActive = true;
3160 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3161 rc = pgmR3LoadRamConfig(pVM, pSSM);
3162 else
3163 rc = VINF_SUCCESS;
3164 if (RT_SUCCESS(rc))
3165 rc = pgmR3LoadRomRanges(pVM, pSSM);
3166 if (RT_SUCCESS(rc))
3167 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3168 if (RT_SUCCESS(rc))
3169 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3170 }
3171 PGM_UNLOCK(pVM);
3172 }
3173 else
3174 {
3175 PGM_LOCK_VOID(pVM);
3176 rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
3177 pVM->pgm.s.LiveSave.fActive = false;
3178 PGM_UNLOCK(pVM);
3179 if (RT_SUCCESS(rc))
3180 {
3181 /*
3182 * We require a full resync now.
3183 */
3184 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3185 {
3186 PVMCPU pVCpu = pVM->apCpusR3[i];
3187 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
3188 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3189 /** @todo For guest PAE, we might get the wrong
3190 * aGCPhysGstPaePDs values now. We should used the
3191 * saved ones... Postponing this since it nothing new
3192 * and PAE/PDPTR needs some general readjusting, see
3193 * @bugref{5880}. */
3194 }
3195
3196 pgmR3HandlerPhysicalUpdateAll(pVM);
3197
3198 /*
3199 * Change the paging mode (indirectly restores PGMCPU::GCPhysCR3).
3200 * (Requires the CPUM state to be restored already!)
3201 */
3202 if (CPUMR3IsStateRestorePending(pVM))
3203 return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
3204 N_("PGM was unexpectedly restored before CPUM"));
3205
3206 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3207 {
3208 PVMCPU pVCpu = pVM->apCpusR3[i];
3209
3210 rc = PGMHCChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode, false /* fForce */);
3211 AssertLogRelRCReturn(rc, rc);
3212
3213 /* Update the PSE, NX flags and validity masks. */
3214 pVCpu->pgm.s.fGst32BitPageSizeExtension = CPUMIsGuestPageSizeExtEnabled(pVCpu);
3215 PGMNotifyNxeChanged(pVCpu, CPUMIsGuestNXEnabled(pVCpu));
3216 }
3217 }
3218 }
3219
3220 return rc;
3221}
3222
3223
3224/**
3225 * @callback_method_impl{FNSSMINTLOADDONE}
3226 */
3227static DECLCALLBACK(int) pgmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
3228{
3229 pVM->pgm.s.fRestoreRomPagesOnReset = true;
3230 NOREF(pSSM);
3231 return VINF_SUCCESS;
3232}
3233
3234
3235/**
3236 * Registers the saved state callbacks with SSM.
3237 *
3238 * @returns VBox status code.
3239 * @param pVM The cross context VM structure.
3240 * @param cbRam The RAM size.
3241 */
3242int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
3243{
3244 return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
3245 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
3246 NULL, pgmR3SaveExec, pgmR3SaveDone,
3247 pgmR3LoadPrep, pgmR3Load, pgmR3LoadDone);
3248}
3249
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette