VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp@ 93661

Last change on this file since 93661 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 124.5 KB
Line 
1/* $Id: PGMSavedState.cpp 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, The Saved State Part.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/stam.h>
26#include <VBox/vmm/ssm.h>
27#include <VBox/vmm/pdmdrv.h>
28#include <VBox/vmm/pdmdev.h>
29#include "PGMInternal.h"
30#include <VBox/vmm/vm.h>
31#include "PGMInline.h"
32
33#include <VBox/param.h>
34#include <VBox/err.h>
35
36#include <iprt/asm.h>
37#include <iprt/assert.h>
38#include <iprt/crc.h>
39#include <iprt/mem.h>
40#include <iprt/sha.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43
44
45/*********************************************************************************************************************************
46* Defined Constants And Macros *
47*********************************************************************************************************************************/
48/** Saved state data unit version. */
49#define PGM_SAVED_STATE_VERSION 14
50/** Saved state data unit version before the PAE PDPE registers. */
51#define PGM_SAVED_STATE_VERSION_PRE_PAE 13
52/** Saved state data unit version after this includes ballooned page flags in
53 * the state (see @bugref{5515}). */
54#define PGM_SAVED_STATE_VERSION_BALLOON_BROKEN 12
55/** Saved state before the balloon change. */
56#define PGM_SAVED_STATE_VERSION_PRE_BALLOON 11
57/** Saved state data unit version used during 3.1 development, misses the RAM
58 * config. */
59#define PGM_SAVED_STATE_VERSION_NO_RAM_CFG 10
60/** Saved state data unit version for 3.0 (pre teleportation). */
61#define PGM_SAVED_STATE_VERSION_3_0_0 9
62/** Saved state data unit version for 2.2.2 and later. */
63#define PGM_SAVED_STATE_VERSION_2_2_2 8
64/** Saved state data unit version for 2.2.0. */
65#define PGM_SAVED_STATE_VERSION_RR_DESC 7
66/** Saved state data unit version. */
67#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6
68
69
70/** @name Sparse state record types
71 * @{ */
72/** Zero page. No data. */
73#define PGM_STATE_REC_RAM_ZERO UINT8_C(0x00)
74/** Raw page. */
75#define PGM_STATE_REC_RAM_RAW UINT8_C(0x01)
76/** Raw MMIO2 page. */
77#define PGM_STATE_REC_MMIO2_RAW UINT8_C(0x02)
78/** Zero MMIO2 page. */
79#define PGM_STATE_REC_MMIO2_ZERO UINT8_C(0x03)
80/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
81#define PGM_STATE_REC_ROM_VIRGIN UINT8_C(0x04)
82/** Raw shadowed ROM page. The protection (8-bit) precedes the raw bits. */
83#define PGM_STATE_REC_ROM_SHW_RAW UINT8_C(0x05)
84/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
85#define PGM_STATE_REC_ROM_SHW_ZERO UINT8_C(0x06)
86/** ROM protection (8-bit). */
87#define PGM_STATE_REC_ROM_PROT UINT8_C(0x07)
88/** Ballooned page. No data. */
89#define PGM_STATE_REC_RAM_BALLOONED UINT8_C(0x08)
90/** The last record type. */
91#define PGM_STATE_REC_LAST PGM_STATE_REC_RAM_BALLOONED
92/** End marker. */
93#define PGM_STATE_REC_END UINT8_C(0xff)
94/** Flag indicating that the data is preceded by the page address.
95 * For RAW pages this is a RTGCPHYS. For MMIO2 and ROM pages this is a 8-bit
96 * range ID and a 32-bit page index.
97 */
98#define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80)
99/** @} */
100
101/** The CRC-32 for a zero page. */
102#define PGM_STATE_CRC32_ZERO_PAGE UINT32_C(0xc71c0011)
103/** The CRC-32 for a zero half page. */
104#define PGM_STATE_CRC32_ZERO_HALF_PAGE UINT32_C(0xf1e8ba9e)
105
106
107
108/** @name Old Page types used in older saved states.
109 * @{ */
110/** Old saved state: The usual invalid zero entry. */
111#define PGMPAGETYPE_OLD_INVALID 0
112/** Old saved state: RAM page. (RWX) */
113#define PGMPAGETYPE_OLD_RAM 1
114/** Old saved state: MMIO2 page. (RWX) */
115#define PGMPAGETYPE_OLD_MMIO2 1
116/** Old saved state: MMIO2 page aliased over an MMIO page. (RWX)
117 * See PGMHandlerPhysicalPageAlias(). */
118#define PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO 2
119/** Old saved state: Shadowed ROM. (RWX) */
120#define PGMPAGETYPE_OLD_ROM_SHADOW 3
121/** Old saved state: ROM page. (R-X) */
122#define PGMPAGETYPE_OLD_ROM 4
123/** Old saved state: MMIO page. (---) */
124#define PGMPAGETYPE_OLD_MMIO 5
125/** @} */
126
127
128/*********************************************************************************************************************************
129* Structures and Typedefs *
130*********************************************************************************************************************************/
131/** For loading old saved states. (pre-smp) */
132typedef struct
133{
134 /** If set no conflict checks are required. (boolean) */
135 bool fMappingsFixed;
136 /** Size of fixed mapping */
137 uint32_t cbMappingFixed;
138 /** Base address (GC) of fixed mapping */
139 RTGCPTR GCPtrMappingFixed;
140 /** A20 gate mask.
141 * Our current approach to A20 emulation is to let REM do it and don't bother
142 * anywhere else. The interesting guests will be operating with it enabled anyway.
143 * But should the need arise, we'll subject physical addresses to this mask. */
144 RTGCPHYS GCPhysA20Mask;
145 /** A20 gate state - boolean! */
146 bool fA20Enabled;
147 /** The guest paging mode. */
148 PGMMODE enmGuestMode;
149} PGMOLD;
150
151
152/*********************************************************************************************************************************
153* Global Variables *
154*********************************************************************************************************************************/
155/** PGM fields to save/load. */
156
157static const SSMFIELD s_aPGMFields[] =
158{
159 SSMFIELD_ENTRY_OLD( fMappingsFixed, sizeof(bool)),
160 SSMFIELD_ENTRY_OLD_GCPTR( GCPtrMappingFixed),
161 SSMFIELD_ENTRY_OLD( cbMappingFixed, sizeof(uint32_t)),
162 SSMFIELD_ENTRY( PGM, cBalloonedPages),
163 SSMFIELD_ENTRY_TERM()
164};
165
166static const SSMFIELD s_aPGMFieldsPreBalloon[] =
167{
168 SSMFIELD_ENTRY_OLD( fMappingsFixed, sizeof(bool)),
169 SSMFIELD_ENTRY_OLD_GCPTR( GCPtrMappingFixed),
170 SSMFIELD_ENTRY_OLD( cbMappingFixed, sizeof(uint32_t)),
171 SSMFIELD_ENTRY_TERM()
172};
173
174static const SSMFIELD s_aPGMCpuFields[] =
175{
176 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
177 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
178 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
179 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[0]),
180 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[1]),
181 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[2]),
182 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[3]),
183 SSMFIELD_ENTRY_TERM()
184};
185
186static const SSMFIELD s_aPGMCpuFieldsPrePae[] =
187{
188 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
189 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
190 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
191 SSMFIELD_ENTRY_TERM()
192};
193
194static const SSMFIELD s_aPGMFields_Old[] =
195{
196 SSMFIELD_ENTRY( PGMOLD, fMappingsFixed),
197 SSMFIELD_ENTRY_GCPTR( PGMOLD, GCPtrMappingFixed),
198 SSMFIELD_ENTRY( PGMOLD, cbMappingFixed),
199 SSMFIELD_ENTRY( PGMOLD, fA20Enabled),
200 SSMFIELD_ENTRY_GCPHYS( PGMOLD, GCPhysA20Mask),
201 SSMFIELD_ENTRY( PGMOLD, enmGuestMode),
202 SSMFIELD_ENTRY_TERM()
203};
204
205
206/**
207 * Find the ROM tracking structure for the given page.
208 *
209 * @returns Pointer to the ROM page structure. NULL if the caller didn't check
210 * that it's a ROM page.
211 * @param pVM The cross context VM structure.
212 * @param GCPhys The address of the ROM page.
213 */
214static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
215{
216 for (PPGMROMRANGE pRomRange = pVM->pgm.s.CTX_SUFF(pRomRanges);
217 pRomRange;
218 pRomRange = pRomRange->CTX_SUFF(pNext))
219 {
220 RTGCPHYS off = GCPhys - pRomRange->GCPhys;
221 if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
222 return &pRomRange->aPages[off >> GUEST_PAGE_SHIFT];
223 }
224 return NULL;
225}
226
227
228/**
229 * Prepares the ROM pages for a live save.
230 *
231 * @returns VBox status code.
232 * @param pVM The cross context VM structure.
233 */
234static int pgmR3PrepRomPages(PVM pVM)
235{
236 /*
237 * Initialize the live save tracking in the ROM page descriptors.
238 */
239 PGM_LOCK_VOID(pVM);
240 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
241 {
242 PPGMRAMRANGE pRamHint = NULL;;
243 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
244
245 for (uint32_t iPage = 0; iPage < cPages; iPage++)
246 {
247 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)PGMROMPROT_INVALID;
248 pRom->aPages[iPage].LiveSave.fWrittenTo = false;
249 pRom->aPages[iPage].LiveSave.fDirty = true;
250 pRom->aPages[iPage].LiveSave.fDirtiedRecently = true;
251 if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
252 {
253 if (PGMROMPROT_IS_ROM(pRom->aPages[iPage].enmProt))
254 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
255 else
256 {
257 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
258 PPGMPAGE pPage;
259 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
260 AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys));
261 if (RT_SUCCESS(rc))
262 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(pPage) && !PGM_PAGE_IS_BALLOONED(pPage);
263 else
264 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
265 }
266 }
267 }
268
269 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
270 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
271 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
272 }
273 PGM_UNLOCK(pVM);
274
275 return VINF_SUCCESS;
276}
277
278
279/**
280 * Assigns IDs to the ROM ranges and saves them.
281 *
282 * @returns VBox status code.
283 * @param pVM The cross context VM structure.
284 * @param pSSM Saved state handle.
285 */
286static int pgmR3SaveRomRanges(PVM pVM, PSSMHANDLE pSSM)
287{
288 PGM_LOCK_VOID(pVM);
289 uint8_t id = 1;
290 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3, id++)
291 {
292 pRom->idSavedState = id;
293 SSMR3PutU8(pSSM, id);
294 SSMR3PutStrZ(pSSM, ""); /* device name */
295 SSMR3PutU32(pSSM, 0); /* device instance */
296 SSMR3PutU8(pSSM, 0); /* region */
297 SSMR3PutStrZ(pSSM, pRom->pszDesc);
298 SSMR3PutGCPhys(pSSM, pRom->GCPhys);
299 int rc = SSMR3PutGCPhys(pSSM, pRom->cb);
300 if (RT_FAILURE(rc))
301 break;
302 }
303 PGM_UNLOCK(pVM);
304 return SSMR3PutU8(pSSM, UINT8_MAX);
305}
306
307
308/**
309 * Loads the ROM range ID assignments.
310 *
311 * @returns VBox status code.
312 *
313 * @param pVM The cross context VM structure.
314 * @param pSSM The saved state handle.
315 */
316static int pgmR3LoadRomRanges(PVM pVM, PSSMHANDLE pSSM)
317{
318 PGM_LOCK_ASSERT_OWNER(pVM);
319
320 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
321 pRom->idSavedState = UINT8_MAX;
322
323 for (;;)
324 {
325 /*
326 * Read the data.
327 */
328 uint8_t id;
329 int rc = SSMR3GetU8(pSSM, &id);
330 if (RT_FAILURE(rc))
331 return rc;
332 if (id == UINT8_MAX)
333 {
334 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
335 if (pRom->idSavedState != UINT8_MAX)
336 { /* likely */ }
337 else if (pRom->fFlags & PGMPHYS_ROM_FLAGS_MAYBE_MISSING_FROM_STATE)
338 LogRel(("PGM: The '%s' ROM was not found in the saved state, but it is marked as maybe-missing, so that's probably okay.\n",
339 pRom->pszDesc));
340 else
341 AssertLogRelMsg(pRom->idSavedState != UINT8_MAX,
342 ("The '%s' ROM was not found in the saved state. Probably due to some misconfiguration\n",
343 pRom->pszDesc));
344 return VINF_SUCCESS; /* the end */
345 }
346 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
347
348 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
349 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
350 AssertLogRelRCReturn(rc, rc);
351
352 uint32_t uInstance;
353 SSMR3GetU32(pSSM, &uInstance);
354 uint8_t iRegion;
355 SSMR3GetU8(pSSM, &iRegion);
356
357 char szDesc[64];
358 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
359 AssertLogRelRCReturn(rc, rc);
360
361 RTGCPHYS GCPhys;
362 SSMR3GetGCPhys(pSSM, &GCPhys);
363 RTGCPHYS cb;
364 rc = SSMR3GetGCPhys(pSSM, &cb);
365 if (RT_FAILURE(rc))
366 return rc;
367 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
368 AssertLogRelMsgReturn(!(cb & GUEST_PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
369
370 /*
371 * Locate a matching ROM range.
372 */
373 AssertLogRelMsgReturn( uInstance == 0
374 && iRegion == 0
375 && szDevName[0] == '\0',
376 ("GCPhys=%RGp %s\n", GCPhys, szDesc),
377 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
378 PPGMROMRANGE pRom;
379 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
380 {
381 if ( pRom->idSavedState == UINT8_MAX
382 && !strcmp(pRom->pszDesc, szDesc))
383 {
384 pRom->idSavedState = id;
385 break;
386 }
387 }
388 if (!pRom)
389 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("ROM at %RGp by the name '%s' was not found"), GCPhys, szDesc);
390 } /* forever */
391}
392
393
394/**
395 * Scan ROM pages.
396 *
397 * @param pVM The cross context VM structure.
398 */
399static void pgmR3ScanRomPages(PVM pVM)
400{
401 /*
402 * The shadow ROMs.
403 */
404 PGM_LOCK_VOID(pVM);
405 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
406 {
407 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
408 {
409 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
410 for (uint32_t iPage = 0; iPage < cPages; iPage++)
411 {
412 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
413 if (pRomPage->LiveSave.fWrittenTo)
414 {
415 pRomPage->LiveSave.fWrittenTo = false;
416 if (!pRomPage->LiveSave.fDirty)
417 {
418 pRomPage->LiveSave.fDirty = true;
419 pVM->pgm.s.LiveSave.Rom.cReadyPages--;
420 pVM->pgm.s.LiveSave.Rom.cDirtyPages++;
421 }
422 pRomPage->LiveSave.fDirtiedRecently = true;
423 }
424 else
425 pRomPage->LiveSave.fDirtiedRecently = false;
426 }
427 }
428 }
429 PGM_UNLOCK(pVM);
430}
431
432
433/**
434 * Takes care of the virgin ROM pages in the first pass.
435 *
436 * This is an attempt at simplifying the handling of ROM pages a little bit.
437 * This ASSUMES that no new ROM ranges will be added and that they won't be
438 * relinked in any way.
439 *
440 * @param pVM The cross context VM structure.
441 * @param pSSM The SSM handle.
442 * @param fLiveSave Whether we're in a live save or not.
443 */
444static int pgmR3SaveRomVirginPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
445{
446 PGM_LOCK_VOID(pVM);
447 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
448 {
449 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
450 for (uint32_t iPage = 0; iPage < cPages; iPage++)
451 {
452 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
453 PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;
454
455 /* Get the virgin page descriptor. */
456 PPGMPAGE pPage;
457 if (PGMROMPROT_IS_ROM(enmProt))
458 pPage = pgmPhysGetPage(pVM, GCPhys);
459 else
460 pPage = &pRom->aPages[iPage].Virgin;
461
462 /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
463 int rc = VINF_SUCCESS;
464 char abPage[GUEST_PAGE_SIZE];
465 if ( !PGM_PAGE_IS_ZERO(pPage)
466 && !PGM_PAGE_IS_BALLOONED(pPage))
467 {
468 void const *pvPage;
469 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
470 if (RT_SUCCESS(rc))
471 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
472 }
473 else
474 RT_ZERO(abPage);
475 PGM_UNLOCK(pVM);
476 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
477
478 /* Save it. */
479 if (iPage > 0)
480 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
481 else
482 {
483 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
484 SSMR3PutU8(pSSM, pRom->idSavedState);
485 SSMR3PutU32(pSSM, iPage);
486 }
487 SSMR3PutU8(pSSM, (uint8_t)enmProt);
488 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
489 if (RT_FAILURE(rc))
490 return rc;
491
492 /* Update state. */
493 PGM_LOCK_VOID(pVM);
494 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
495 if (fLiveSave)
496 {
497 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
498 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
499 pVM->pgm.s.LiveSave.cSavedPages++;
500 }
501 }
502 }
503 PGM_UNLOCK(pVM);
504 return VINF_SUCCESS;
505}
506
507
508/**
509 * Saves dirty pages in the shadowed ROM ranges.
510 *
511 * Used by pgmR3LiveExecPart2 and pgmR3SaveExecMemory.
512 *
513 * @returns VBox status code.
514 * @param pVM The cross context VM structure.
515 * @param pSSM The SSM handle.
516 * @param fLiveSave Whether it's a live save or not.
517 * @param fFinalPass Whether this is the final pass or not.
518 */
519static int pgmR3SaveShadowedRomPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
520{
521 /*
522 * The Shadowed ROMs.
523 *
524 * ASSUMES that the ROM ranges are fixed.
525 * ASSUMES that all the ROM ranges are mapped.
526 */
527 PGM_LOCK_VOID(pVM);
528 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
529 {
530 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
531 {
532 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
533 uint32_t iPrevPage = cPages;
534 for (uint32_t iPage = 0; iPage < cPages; iPage++)
535 {
536 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
537 if ( !fLiveSave
538 || ( pRomPage->LiveSave.fDirty
539 && ( ( !pRomPage->LiveSave.fDirtiedRecently
540 && !pRomPage->LiveSave.fWrittenTo)
541 || fFinalPass
542 )
543 )
544 )
545 {
546 uint8_t abPage[GUEST_PAGE_SIZE];
547 PGMROMPROT enmProt = pRomPage->enmProt;
548 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
549 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(pVM, GCPhys);
550 bool fZero = PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_BALLOONED(pPage); Assert(!PGM_PAGE_IS_BALLOONED(pPage)); /* Shouldn't be ballooned. */
551 int rc = VINF_SUCCESS;
552 if (!fZero)
553 {
554 void const *pvPage;
555 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
556 if (RT_SUCCESS(rc))
557 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
558 }
559 if (fLiveSave && RT_SUCCESS(rc))
560 {
561 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
562 pRomPage->LiveSave.fDirty = false;
563 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
564 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
565 pVM->pgm.s.LiveSave.cSavedPages++;
566 }
567 PGM_UNLOCK(pVM);
568 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
569
570 if (iPage - 1U == iPrevPage && iPage > 0)
571 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW));
572 else
573 {
574 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW) | PGM_STATE_REC_FLAG_ADDR);
575 SSMR3PutU8(pSSM, pRom->idSavedState);
576 SSMR3PutU32(pSSM, iPage);
577 }
578 rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
579 if (!fZero)
580 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
581 if (RT_FAILURE(rc))
582 return rc;
583
584 PGM_LOCK_VOID(pVM);
585 iPrevPage = iPage;
586 }
587 /*
588 * In the final pass, make sure the protection is in sync.
589 */
590 else if ( fFinalPass
591 && pRomPage->LiveSave.u8Prot != pRomPage->enmProt)
592 {
593 PGMROMPROT enmProt = pRomPage->enmProt;
594 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
595 PGM_UNLOCK(pVM);
596
597 if (iPage - 1U == iPrevPage && iPage > 0)
598 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT);
599 else
600 {
601 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT | PGM_STATE_REC_FLAG_ADDR);
602 SSMR3PutU8(pSSM, pRom->idSavedState);
603 SSMR3PutU32(pSSM, iPage);
604 }
605 int rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
606 if (RT_FAILURE(rc))
607 return rc;
608
609 PGM_LOCK_VOID(pVM);
610 iPrevPage = iPage;
611 }
612 }
613 }
614 }
615 PGM_UNLOCK(pVM);
616 return VINF_SUCCESS;
617}
618
619
620/**
621 * Cleans up ROM pages after a live save.
622 *
623 * @param pVM The cross context VM structure.
624 */
625static void pgmR3DoneRomPages(PVM pVM)
626{
627 NOREF(pVM);
628}
629
630
631/**
632 * Prepares the MMIO2 pages for a live save.
633 *
634 * @returns VBox status code.
635 * @param pVM The cross context VM structure.
636 */
637static int pgmR3PrepMmio2Pages(PVM pVM)
638{
639 /*
640 * Initialize the live save tracking in the MMIO2 ranges.
641 * ASSUME nothing changes here.
642 */
643 PGM_LOCK_VOID(pVM);
644 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
645 {
646 uint32_t const cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
647 PGM_UNLOCK(pVM);
648
649 PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM,
650 sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
651 if (!paLSPages)
652 return VERR_NO_MEMORY;
653 for (uint32_t iPage = 0; iPage < cPages; iPage++)
654 {
655 /* Initialize it as a dirty zero page. */
656 paLSPages[iPage].fDirty = true;
657 paLSPages[iPage].cUnchangedScans = 0;
658 paLSPages[iPage].fZero = true;
659 paLSPages[iPage].u32CrcH1 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
660 paLSPages[iPage].u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
661 }
662
663 PGM_LOCK_VOID(pVM);
664 pRegMmio->paLSPages = paLSPages;
665 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
666 }
667 PGM_UNLOCK(pVM);
668 return VINF_SUCCESS;
669}
670
671
672/**
673 * Assigns IDs to the MMIO2 ranges and saves them.
674 *
675 * @returns VBox status code.
676 * @param pVM The cross context VM structure.
677 * @param pSSM Saved state handle.
678 */
679static int pgmR3SaveMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
680{
681 PGM_LOCK_VOID(pVM);
682 uint8_t id = 1;
683 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
684 {
685 pRegMmio->idSavedState = id;
686 SSMR3PutU8(pSSM, id);
687 SSMR3PutStrZ(pSSM, pRegMmio->pDevInsR3->pReg->szName);
688 SSMR3PutU32(pSSM, pRegMmio->pDevInsR3->iInstance);
689 SSMR3PutU8(pSSM, pRegMmio->iRegion);
690 SSMR3PutStrZ(pSSM, pRegMmio->RamRange.pszDesc);
691 int rc = SSMR3PutGCPhys(pSSM, pRegMmio->RamRange.cb);
692 if (RT_FAILURE(rc))
693 break;
694 id++;
695 }
696 PGM_UNLOCK(pVM);
697 return SSMR3PutU8(pSSM, UINT8_MAX);
698}
699
700
701/**
702 * Loads the MMIO2 range ID assignments.
703 *
704 * @returns VBox status code.
705 *
706 * @param pVM The cross context VM structure.
707 * @param pSSM The saved state handle.
708 */
709static int pgmR3LoadMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
710{
711 PGM_LOCK_ASSERT_OWNER(pVM);
712
713 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
714 pRegMmio->idSavedState = UINT8_MAX;
715
716 for (;;)
717 {
718 /*
719 * Read the data.
720 */
721 uint8_t id;
722 int rc = SSMR3GetU8(pSSM, &id);
723 if (RT_FAILURE(rc))
724 return rc;
725 if (id == UINT8_MAX)
726 {
727 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
728 AssertLogRelMsg(pRegMmio->idSavedState != UINT8_MAX, ("%s\n", pRegMmio->RamRange.pszDesc));
729 return VINF_SUCCESS; /* the end */
730 }
731 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
732
733 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
734 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
735 AssertLogRelRCReturn(rc, rc);
736
737 uint32_t uInstance;
738 SSMR3GetU32(pSSM, &uInstance);
739 uint8_t iRegion;
740 SSMR3GetU8(pSSM, &iRegion);
741
742 char szDesc[64];
743 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
744 AssertLogRelRCReturn(rc, rc);
745
746 RTGCPHYS cb;
747 rc = SSMR3GetGCPhys(pSSM, &cb);
748 AssertLogRelMsgReturn(!(cb & GUEST_PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
749
750 /*
751 * Locate a matching MMIO2 range.
752 */
753 PPGMREGMMIO2RANGE pRegMmio;
754 for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
755 {
756 if ( pRegMmio->idSavedState == UINT8_MAX
757 && pRegMmio->iRegion == iRegion
758 && pRegMmio->pDevInsR3->iInstance == uInstance
759 && !strcmp(pRegMmio->pDevInsR3->pReg->szName, szDevName))
760 {
761 pRegMmio->idSavedState = id;
762 break;
763 }
764 }
765 if (!pRegMmio)
766 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to locate a MMIO2 range called '%s' owned by %s/%u, region %d"),
767 szDesc, szDevName, uInstance, iRegion);
768
769 /*
770 * Validate the configuration, the size of the MMIO2 region should be
771 * the same.
772 */
773 if (cb != pRegMmio->RamRange.cb)
774 {
775 LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n",
776 pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb));
777 if (cb > pRegMmio->RamRange.cb) /* bad idea? */
778 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp"),
779 pRegMmio->RamRange.pszDesc, cb, pRegMmio->RamRange.cb);
780 }
781 } /* forever */
782}
783
784
785/**
786 * Scans one MMIO2 page.
787 *
788 * @returns True if changed, false if unchanged.
789 *
790 * @param pVM The cross context VM structure.
791 * @param pbPage The page bits.
792 * @param pLSPage The live save tracking structure for the page.
793 *
794 */
795DECLINLINE(bool) pgmR3ScanMmio2Page(PVM pVM, uint8_t const *pbPage, PPGMLIVESAVEMMIO2PAGE pLSPage)
796{
797 /*
798 * Special handling of zero pages.
799 */
800 bool const fZero = pLSPage->fZero;
801 if (fZero)
802 {
803 if (ASMMemIsZero(pbPage, GUEST_PAGE_SIZE))
804 {
805 /* Not modified. */
806 if (pLSPage->fDirty)
807 pLSPage->cUnchangedScans++;
808 return false;
809 }
810
811 pLSPage->fZero = false;
812 pLSPage->u32CrcH1 = RTCrc32(pbPage, GUEST_PAGE_SIZE / 2);
813 }
814 else
815 {
816 /*
817 * CRC the first half, if it doesn't match the page is dirty and
818 * we won't check the 2nd half (we'll do that next time).
819 */
820 uint32_t u32CrcH1 = RTCrc32(pbPage, GUEST_PAGE_SIZE / 2);
821 if (u32CrcH1 == pLSPage->u32CrcH1)
822 {
823 uint32_t u32CrcH2 = RTCrc32(pbPage + GUEST_PAGE_SIZE / 2, GUEST_PAGE_SIZE / 2);
824 if (u32CrcH2 == pLSPage->u32CrcH2)
825 {
826 /* Probably not modified. */
827 if (pLSPage->fDirty)
828 pLSPage->cUnchangedScans++;
829 return false;
830 }
831
832 pLSPage->u32CrcH2 = u32CrcH2;
833 }
834 else
835 {
836 pLSPage->u32CrcH1 = u32CrcH1;
837 if ( u32CrcH1 == PGM_STATE_CRC32_ZERO_HALF_PAGE
838 && ASMMemIsZero(pbPage, GUEST_PAGE_SIZE))
839 {
840 pLSPage->u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
841 pLSPage->fZero = true;
842 }
843 }
844 }
845
846 /* dirty page path */
847 pLSPage->cUnchangedScans = 0;
848 if (!pLSPage->fDirty)
849 {
850 pLSPage->fDirty = true;
851 pVM->pgm.s.LiveSave.Mmio2.cReadyPages--;
852 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages++;
853 if (fZero)
854 pVM->pgm.s.LiveSave.Mmio2.cZeroPages--;
855 }
856 return true;
857}
858
859
860/**
861 * Scan for MMIO2 page modifications.
862 *
863 * @param pVM The cross context VM structure.
864 * @param uPass The pass number.
865 */
866static void pgmR3ScanMmio2Pages(PVM pVM, uint32_t uPass)
867{
868 /*
869 * Since this is a bit expensive we lower the scan rate after a little while.
870 */
871 if ( ( (uPass & 3) != 0
872 && uPass > 10)
873 || uPass == SSM_PASS_FINAL)
874 return;
875
876 PGM_LOCK_VOID(pVM); /* paranoia */
877 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
878 {
879 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
880 uint32_t cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
881 PGM_UNLOCK(pVM);
882
883 for (uint32_t iPage = 0; iPage < cPages; iPage++)
884 {
885 uint8_t const *pbPage = (uint8_t const *)pRegMmio->pvR3 + iPage * GUEST_PAGE_SIZE;
886 pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]);
887 }
888
889 PGM_LOCK_VOID(pVM);
890 }
891 PGM_UNLOCK(pVM);
892
893}
894
895
896/**
897 * Save quiescent MMIO2 pages.
898 *
899 * @returns VBox status code.
900 * @param pVM The cross context VM structure.
901 * @param pSSM The SSM handle.
902 * @param fLiveSave Whether it's a live save or not.
903 * @param uPass The pass number.
904 */
905static int pgmR3SaveMmio2Pages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
906{
907 /** @todo implement live saving of MMIO2 pages. (Need some way of telling the
908 * device that we wish to know about changes.) */
909
910 int rc = VINF_SUCCESS;
911 if (uPass == SSM_PASS_FINAL)
912 {
913 /*
914 * The mop up round.
915 */
916 PGM_LOCK_VOID(pVM);
917 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3;
918 pRegMmio && RT_SUCCESS(rc);
919 pRegMmio = pRegMmio->pNextR3)
920 {
921 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
922 uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3;
923 uint32_t cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
924 uint32_t iPageLast = cPages;
925 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += GUEST_PAGE_SIZE)
926 {
927 uint8_t u8Type;
928 if (!fLiveSave)
929 u8Type = ASMMemIsZero(pbPage, GUEST_PAGE_SIZE) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
930 else
931 {
932 /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
933 if ( !paLSPages[iPage].fDirty
934 && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
935 {
936 if (paLSPages[iPage].fZero)
937 continue;
938
939 uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
940 RTSha1(pbPage, GUEST_PAGE_SIZE, abSha1Hash);
941 if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
942 continue;
943 }
944 u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
945 pVM->pgm.s.LiveSave.cSavedPages++;
946 }
947
948 if (iPage != 0 && iPage == iPageLast + 1)
949 rc = SSMR3PutU8(pSSM, u8Type);
950 else
951 {
952 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
953 SSMR3PutU8(pSSM, pRegMmio->idSavedState);
954 rc = SSMR3PutU32(pSSM, iPage);
955 }
956 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
957 rc = SSMR3PutMem(pSSM, pbPage, GUEST_PAGE_SIZE);
958 if (RT_FAILURE(rc))
959 break;
960 iPageLast = iPage;
961 }
962 }
963 PGM_UNLOCK(pVM);
964 }
965 /*
966 * Reduce the rate after a little while since the current MMIO2 approach is
967 * a bit expensive.
968 * We position it two passes after the scan pass to avoid saving busy pages.
969 */
970 else if ( uPass <= 10
971 || (uPass & 3) == 2)
972 {
973 PGM_LOCK_VOID(pVM);
974 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3;
975 pRegMmio && RT_SUCCESS(rc);
976 pRegMmio = pRegMmio->pNextR3)
977 {
978 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio->paLSPages;
979 uint8_t const *pbPage = (uint8_t const *)pRegMmio->RamRange.pvR3;
980 uint32_t cPages = pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT;
981 uint32_t iPageLast = cPages;
982 PGM_UNLOCK(pVM);
983
984 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += GUEST_PAGE_SIZE)
985 {
986 /* Skip clean pages and pages which hasn't quiesced. */
987 if (!paLSPages[iPage].fDirty)
988 continue;
989 if (paLSPages[iPage].cUnchangedScans < 3)
990 continue;
991 if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
992 continue;
993
994 /* Save it. */
995 bool const fZero = paLSPages[iPage].fZero;
996 uint8_t abPage[GUEST_PAGE_SIZE];
997 if (!fZero)
998 {
999 memcpy(abPage, pbPage, GUEST_PAGE_SIZE);
1000 RTSha1(abPage, GUEST_PAGE_SIZE, paLSPages[iPage].abSha1Saved);
1001 }
1002
1003 uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
1004 if (iPage != 0 && iPage == iPageLast + 1)
1005 rc = SSMR3PutU8(pSSM, u8Type);
1006 else
1007 {
1008 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
1009 SSMR3PutU8(pSSM, pRegMmio->idSavedState);
1010 rc = SSMR3PutU32(pSSM, iPage);
1011 }
1012 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
1013 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
1014 if (RT_FAILURE(rc))
1015 break;
1016
1017 /* Housekeeping. */
1018 paLSPages[iPage].fDirty = false;
1019 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
1020 pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
1021 if (u8Type == PGM_STATE_REC_MMIO2_ZERO)
1022 pVM->pgm.s.LiveSave.Mmio2.cZeroPages++;
1023 pVM->pgm.s.LiveSave.cSavedPages++;
1024 iPageLast = iPage;
1025 }
1026
1027 PGM_LOCK_VOID(pVM);
1028 }
1029 PGM_UNLOCK(pVM);
1030 }
1031
1032 return rc;
1033}
1034
1035
1036/**
1037 * Cleans up MMIO2 pages after a live save.
1038 *
1039 * @param pVM The cross context VM structure.
1040 */
1041static void pgmR3DoneMmio2Pages(PVM pVM)
1042{
1043 /*
1044 * Free the tracking structures for the MMIO2 pages.
1045 * We do the freeing outside the lock in case the VM is running.
1046 */
1047 PGM_LOCK_VOID(pVM);
1048 for (PPGMREGMMIO2RANGE pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
1049 {
1050 void *pvMmio2ToFree = pRegMmio->paLSPages;
1051 if (pvMmio2ToFree)
1052 {
1053 pRegMmio->paLSPages = NULL;
1054 PGM_UNLOCK(pVM);
1055 MMR3HeapFree(pvMmio2ToFree);
1056 PGM_LOCK_VOID(pVM);
1057 }
1058 }
1059 PGM_UNLOCK(pVM);
1060}
1061
1062
1063/**
1064 * Prepares the RAM pages for a live save.
1065 *
1066 * @returns VBox status code.
1067 * @param pVM The cross context VM structure.
1068 */
1069static int pgmR3PrepRamPages(PVM pVM)
1070{
1071
1072 /*
1073 * Try allocating tracking structures for the ram ranges.
1074 *
1075 * To avoid lock contention, we leave the lock every time we're allocating
1076 * a new array. This means we'll have to ditch the allocation and start
1077 * all over again if the RAM range list changes in-between.
1078 *
1079 * Note! pgmR3SaveDone will always be called and it is therefore responsible
1080 * for cleaning up.
1081 */
1082 PPGMRAMRANGE pCur;
1083 PGM_LOCK_VOID(pVM);
1084 do
1085 {
1086 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1087 {
1088 if ( !pCur->paLSPages
1089 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1090 {
1091 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1092 uint32_t const cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1093 PGM_UNLOCK(pVM);
1094 PPGMLIVESAVERAMPAGE paLSPages = (PPGMLIVESAVERAMPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVERAMPAGE));
1095 if (!paLSPages)
1096 return VERR_NO_MEMORY;
1097 PGM_LOCK_VOID(pVM);
1098 if (pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1099 {
1100 PGM_UNLOCK(pVM);
1101 MMR3HeapFree(paLSPages);
1102 PGM_LOCK_VOID(pVM);
1103 break; /* try again */
1104 }
1105 pCur->paLSPages = paLSPages;
1106
1107 /*
1108 * Initialize the array.
1109 */
1110 uint32_t iPage = cPages;
1111 while (iPage-- > 0)
1112 {
1113 /** @todo yield critsect! (after moving this away from EMT0) */
1114 PCPGMPAGE pPage = &pCur->aPages[iPage];
1115 paLSPages[iPage].cDirtied = 0;
1116 paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */
1117 paLSPages[iPage].fWriteMonitored = 0;
1118 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1119 paLSPages[iPage].u2Reserved = 0;
1120 switch (PGM_PAGE_GET_TYPE(pPage))
1121 {
1122 case PGMPAGETYPE_RAM:
1123 if ( PGM_PAGE_IS_ZERO(pPage)
1124 || PGM_PAGE_IS_BALLOONED(pPage))
1125 {
1126 paLSPages[iPage].fZero = 1;
1127 paLSPages[iPage].fShared = 0;
1128#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1129 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1130#endif
1131 }
1132 else if (PGM_PAGE_IS_SHARED(pPage))
1133 {
1134 paLSPages[iPage].fZero = 0;
1135 paLSPages[iPage].fShared = 1;
1136#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1137 paLSPages[iPage].u32Crc = UINT32_MAX;
1138#endif
1139 }
1140 else
1141 {
1142 paLSPages[iPage].fZero = 0;
1143 paLSPages[iPage].fShared = 0;
1144#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1145 paLSPages[iPage].u32Crc = UINT32_MAX;
1146#endif
1147 }
1148 paLSPages[iPage].fIgnore = 0;
1149 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1150 break;
1151
1152 case PGMPAGETYPE_ROM_SHADOW:
1153 case PGMPAGETYPE_ROM:
1154 {
1155 paLSPages[iPage].fZero = 0;
1156 paLSPages[iPage].fShared = 0;
1157 paLSPages[iPage].fDirty = 0;
1158 paLSPages[iPage].fIgnore = 1;
1159#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1160 paLSPages[iPage].u32Crc = UINT32_MAX;
1161#endif
1162 pVM->pgm.s.LiveSave.cIgnoredPages++;
1163 break;
1164 }
1165
1166 default:
1167 AssertMsgFailed(("%R[pgmpage]", pPage));
1168 RT_FALL_THRU();
1169 case PGMPAGETYPE_MMIO2:
1170 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1171 paLSPages[iPage].fZero = 0;
1172 paLSPages[iPage].fShared = 0;
1173 paLSPages[iPage].fDirty = 0;
1174 paLSPages[iPage].fIgnore = 1;
1175#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1176 paLSPages[iPage].u32Crc = UINT32_MAX;
1177#endif
1178 pVM->pgm.s.LiveSave.cIgnoredPages++;
1179 break;
1180
1181 case PGMPAGETYPE_MMIO:
1182 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
1183 paLSPages[iPage].fZero = 0;
1184 paLSPages[iPage].fShared = 0;
1185 paLSPages[iPage].fDirty = 0;
1186 paLSPages[iPage].fIgnore = 1;
1187#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1188 paLSPages[iPage].u32Crc = UINT32_MAX;
1189#endif
1190 pVM->pgm.s.LiveSave.cIgnoredPages++;
1191 break;
1192 }
1193 }
1194 }
1195 }
1196 } while (pCur);
1197 PGM_UNLOCK(pVM);
1198
1199 return VINF_SUCCESS;
1200}
1201
1202
1203/**
1204 * Saves the RAM configuration.
1205 *
1206 * @returns VBox status code.
1207 * @param pVM The cross context VM structure.
1208 * @param pSSM The saved state handle.
1209 */
1210static int pgmR3SaveRamConfig(PVM pVM, PSSMHANDLE pSSM)
1211{
1212 uint32_t cbRamHole = 0;
1213 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
1214 AssertRCReturn(rc, rc);
1215
1216 uint64_t cbRam = 0;
1217 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
1218 AssertRCReturn(rc, rc);
1219
1220 SSMR3PutU32(pSSM, cbRamHole);
1221 return SSMR3PutU64(pSSM, cbRam);
1222}
1223
1224
1225/**
1226 * Loads and verifies the RAM configuration.
1227 *
1228 * @returns VBox status code.
1229 * @param pVM The cross context VM structure.
1230 * @param pSSM The saved state handle.
1231 */
1232static int pgmR3LoadRamConfig(PVM pVM, PSSMHANDLE pSSM)
1233{
1234 uint32_t cbRamHoleCfg = 0;
1235 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHoleCfg, MM_RAM_HOLE_SIZE_DEFAULT);
1236 AssertRCReturn(rc, rc);
1237
1238 uint64_t cbRamCfg = 0;
1239 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRamCfg, 0);
1240 AssertRCReturn(rc, rc);
1241
1242 uint32_t cbRamHoleSaved;
1243 SSMR3GetU32(pSSM, &cbRamHoleSaved);
1244
1245 uint64_t cbRamSaved;
1246 rc = SSMR3GetU64(pSSM, &cbRamSaved);
1247 AssertRCReturn(rc, rc);
1248
1249 if ( cbRamHoleCfg != cbRamHoleSaved
1250 || cbRamCfg != cbRamSaved)
1251 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Ram config mismatch: saved=%RX64/%RX32 config=%RX64/%RX32 (RAM/Hole)"),
1252 cbRamSaved, cbRamHoleSaved, cbRamCfg, cbRamHoleCfg);
1253 return VINF_SUCCESS;
1254}
1255
1256#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1257
1258/**
1259 * Calculates the CRC-32 for a RAM page and updates the live save page tracking
1260 * info with it.
1261 *
1262 * @param pVM The cross context VM structure.
1263 * @param pCur The current RAM range.
1264 * @param paLSPages The current array of live save page tracking
1265 * structures.
1266 * @param iPage The page index.
1267 */
1268static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
1269{
1270 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1271 PGMPAGEMAPLOCK PgMpLck;
1272 void const *pvPage;
1273 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1274 if (RT_SUCCESS(rc))
1275 {
1276 paLSPages[iPage].u32Crc = RTCrc32(pvPage, GUEST_PAGE_SIZE);
1277 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1278 }
1279 else
1280 paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */
1281}
1282
1283
1284/**
1285 * Verifies the CRC-32 for a page given it's raw bits.
1286 *
1287 * @param pvPage The page bits.
1288 * @param pCur The current RAM range.
1289 * @param paLSPages The current array of live save page tracking
1290 * structures.
1291 * @param iPage The page index.
1292 */
1293static void pgmR3StateVerifyCrc32ForPage(void const *pvPage, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1294{
1295 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1296 {
1297 uint32_t u32Crc = RTCrc32(pvPage, GUEST_PAGE_SIZE);
1298 Assert( ( !PGM_PAGE_IS_ZERO(&pCur->aPages[iPage])
1299 && !PGM_PAGE_IS_BALLOONED(&pCur->aPages[iPage]))
1300 || u32Crc == PGM_STATE_CRC32_ZERO_PAGE);
1301 AssertMsg(paLSPages[iPage].u32Crc == u32Crc,
1302 ("%08x != %08x for %RGp %R[pgmpage] %s\n", paLSPages[iPage].u32Crc, u32Crc,
1303 pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage], pszWhere));
1304 }
1305}
1306
1307
1308/**
1309 * Verifies the CRC-32 for a RAM page.
1310 *
1311 * @param pVM The cross context VM structure.
1312 * @param pCur The current RAM range.
1313 * @param paLSPages The current array of live save page tracking
1314 * structures.
1315 * @param iPage The page index.
1316 */
1317static void pgmR3StateVerifyCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1318{
1319 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1320 {
1321 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1322 PGMPAGEMAPLOCK PgMpLck;
1323 void const *pvPage;
1324 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1325 if (RT_SUCCESS(rc))
1326 {
1327 pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage, pszWhere);
1328 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1329 }
1330 }
1331}
1332
1333#endif /* PGMLIVESAVERAMPAGE_WITH_CRC32 */
1334
1335/**
1336 * Scan for RAM page modifications and reprotect them.
1337 *
1338 * @param pVM The cross context VM structure.
1339 * @param fFinalPass Whether this is the final pass or not.
1340 */
1341static void pgmR3ScanRamPages(PVM pVM, bool fFinalPass)
1342{
1343 /*
1344 * The RAM.
1345 */
1346 RTGCPHYS GCPhysCur = 0;
1347 PPGMRAMRANGE pCur;
1348 PGM_LOCK_VOID(pVM);
1349 do
1350 {
1351 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1352 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1353 {
1354 if ( pCur->GCPhysLast > GCPhysCur
1355 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1356 {
1357 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1358 uint32_t cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1359 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> GUEST_PAGE_SHIFT;
1360 GCPhysCur = 0;
1361 for (; iPage < cPages; iPage++)
1362 {
1363 /* Do yield first. */
1364 if ( !fFinalPass
1365#ifndef PGMLIVESAVERAMPAGE_WITH_CRC32
1366 && (iPage & 0x7ff) == 0x100
1367#endif
1368 && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX)
1369 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1370 {
1371 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1372 break; /* restart */
1373 }
1374
1375 /* Skip already ignored pages. */
1376 if (paLSPages[iPage].fIgnore)
1377 continue;
1378
1379 if (RT_LIKELY(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM))
1380 {
1381 /*
1382 * A RAM page.
1383 */
1384 switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
1385 {
1386 case PGM_PAGE_STATE_ALLOCATED:
1387 /** @todo Optimize this: Don't always re-enable write
1388 * monitoring if the page is known to be very busy. */
1389 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1390 {
1391 AssertMsg(paLSPages[iPage].fWriteMonitored,
1392 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage]));
1393 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1394 Assert(pVM->pgm.s.cWrittenToPages > 0);
1395 pVM->pgm.s.cWrittenToPages--;
1396 }
1397 else
1398 {
1399 AssertMsg(!paLSPages[iPage].fWriteMonitored,
1400 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage]));
1401 pVM->pgm.s.LiveSave.Ram.cMonitoredPages++;
1402 }
1403
1404 if (!paLSPages[iPage].fDirty)
1405 {
1406 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1407 if (paLSPages[iPage].fZero)
1408 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1409 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1410 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1411 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1412 }
1413
1414 pgmPhysPageWriteMonitor(pVM, &pCur->aPages[iPage],
1415 pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT));
1416 paLSPages[iPage].fWriteMonitored = 1;
1417 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1418 paLSPages[iPage].fDirty = 1;
1419 paLSPages[iPage].fZero = 0;
1420 paLSPages[iPage].fShared = 0;
1421#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1422 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1423#endif
1424 break;
1425
1426 case PGM_PAGE_STATE_WRITE_MONITORED:
1427 Assert(paLSPages[iPage].fWriteMonitored);
1428 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) == 0)
1429 {
1430#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1431 if (paLSPages[iPage].fWriteMonitoredJustNow)
1432 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1433 else
1434 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "scan");
1435#endif
1436 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1437 }
1438 else
1439 {
1440 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1441#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1442 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1443#endif
1444 if (!paLSPages[iPage].fDirty)
1445 {
1446 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1447 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1448 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1449 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1450 }
1451 }
1452 break;
1453
1454 case PGM_PAGE_STATE_ZERO:
1455 case PGM_PAGE_STATE_BALLOONED:
1456 if (!paLSPages[iPage].fZero)
1457 {
1458 if (!paLSPages[iPage].fDirty)
1459 {
1460 paLSPages[iPage].fDirty = 1;
1461 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1462 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1463 }
1464 paLSPages[iPage].fZero = 1;
1465 paLSPages[iPage].fShared = 0;
1466#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1467 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1468#endif
1469 }
1470 break;
1471
1472 case PGM_PAGE_STATE_SHARED:
1473 if (!paLSPages[iPage].fShared)
1474 {
1475 if (!paLSPages[iPage].fDirty)
1476 {
1477 paLSPages[iPage].fDirty = 1;
1478 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1479 if (paLSPages[iPage].fZero)
1480 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1481 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1482 }
1483 paLSPages[iPage].fZero = 0;
1484 paLSPages[iPage].fShared = 1;
1485#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1486 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1487#endif
1488 }
1489 break;
1490 }
1491 }
1492 else
1493 {
1494 /*
1495 * All other types => Ignore the page.
1496 */
1497 Assert(!paLSPages[iPage].fIgnore); /* skipped before switch */
1498 paLSPages[iPage].fIgnore = 1;
1499 if (paLSPages[iPage].fWriteMonitored)
1500 {
1501 /** @todo this doesn't hold water when we start monitoring MMIO2 and ROM shadow
1502 * pages! */
1503 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
1504 {
1505 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
1506 PGM_PAGE_SET_STATE(pVM, &pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
1507 Assert(pVM->pgm.s.cMonitoredPages > 0);
1508 pVM->pgm.s.cMonitoredPages--;
1509 }
1510 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1511 {
1512 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1513 Assert(pVM->pgm.s.cWrittenToPages > 0);
1514 pVM->pgm.s.cWrittenToPages--;
1515 }
1516 pVM->pgm.s.LiveSave.Ram.cMonitoredPages--;
1517 }
1518
1519 /** @todo the counting doesn't quite work out here. fix later? */
1520 if (paLSPages[iPage].fDirty)
1521 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1522 else
1523 {
1524 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1525 if (paLSPages[iPage].fZero)
1526 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1527 }
1528 pVM->pgm.s.LiveSave.cIgnoredPages++;
1529 }
1530 } /* for each page in range */
1531
1532 if (GCPhysCur != 0)
1533 break; /* Yield + ramrange change */
1534 GCPhysCur = pCur->GCPhysLast;
1535 }
1536 } /* for each range */
1537 } while (pCur);
1538 PGM_UNLOCK(pVM);
1539}
1540
1541
1542/**
1543 * Save quiescent RAM pages.
1544 *
1545 * @returns VBox status code.
1546 * @param pVM The cross context VM structure.
1547 * @param pSSM The SSM handle.
1548 * @param fLiveSave Whether it's a live save or not.
1549 * @param uPass The pass number.
1550 */
1551static int pgmR3SaveRamPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
1552{
1553 NOREF(fLiveSave);
1554
1555 /*
1556 * The RAM.
1557 */
1558 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
1559 RTGCPHYS GCPhysCur = 0;
1560 PPGMRAMRANGE pCur;
1561
1562 PGM_LOCK_VOID(pVM);
1563 do
1564 {
1565 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1566 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1567 {
1568 if ( pCur->GCPhysLast > GCPhysCur
1569 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1570 {
1571 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1572 uint32_t cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1573 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> GUEST_PAGE_SHIFT;
1574 GCPhysCur = 0;
1575 for (; iPage < cPages; iPage++)
1576 {
1577 /* Do yield first. */
1578 if ( uPass != SSM_PASS_FINAL
1579 && (iPage & 0x7ff) == 0x100
1580 && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX)
1581 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1582 {
1583 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1584 break; /* restart */
1585 }
1586
1587 PPGMPAGE pCurPage = &pCur->aPages[iPage];
1588
1589 /*
1590 * Only save pages that haven't changed since last scan and are dirty.
1591 */
1592 if ( uPass != SSM_PASS_FINAL
1593 && paLSPages)
1594 {
1595 if (!paLSPages[iPage].fDirty)
1596 continue;
1597 if (paLSPages[iPage].fWriteMonitoredJustNow)
1598 continue;
1599 if (paLSPages[iPage].fIgnore)
1600 continue;
1601 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM) /* in case of recent remappings */
1602 continue;
1603 if ( PGM_PAGE_GET_STATE(pCurPage)
1604 != ( paLSPages[iPage].fZero
1605 ? PGM_PAGE_STATE_ZERO
1606 : paLSPages[iPage].fShared
1607 ? PGM_PAGE_STATE_SHARED
1608 : PGM_PAGE_STATE_WRITE_MONITORED))
1609 continue;
1610 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) > 0)
1611 continue;
1612 }
1613 else
1614 {
1615 if ( paLSPages
1616 && !paLSPages[iPage].fDirty
1617 && !paLSPages[iPage].fIgnore)
1618 {
1619#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1620 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1621 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#1");
1622#endif
1623 continue;
1624 }
1625 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1626 continue;
1627 }
1628
1629 /*
1630 * Do the saving outside the PGM critsect since SSM may block on I/O.
1631 */
1632 int rc;
1633 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1634 bool fZero = PGM_PAGE_IS_ZERO(pCurPage);
1635 bool fBallooned = PGM_PAGE_IS_BALLOONED(pCurPage);
1636 bool fSkipped = false;
1637
1638 if (!fZero && !fBallooned)
1639 {
1640 /*
1641 * Copy the page and then save it outside the lock (since any
1642 * SSM call may block).
1643 */
1644 uint8_t abPage[GUEST_PAGE_SIZE];
1645 PGMPAGEMAPLOCK PgMpLck;
1646 void const *pvPage;
1647 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage, &PgMpLck);
1648 if (RT_SUCCESS(rc))
1649 {
1650 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
1651#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1652 if (paLSPages)
1653 pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage, "save#3");
1654#endif
1655 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1656 }
1657 PGM_UNLOCK(pVM);
1658 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
1659
1660 /* Try save some memory when restoring. */
1661 if (!ASMMemIsZero(pvPage, GUEST_PAGE_SIZE))
1662 {
1663 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1664 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
1665 else
1666 {
1667 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
1668 SSMR3PutGCPhys(pSSM, GCPhys);
1669 }
1670 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
1671 }
1672 else
1673 {
1674 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1675 rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO);
1676 else
1677 {
1678 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO | PGM_STATE_REC_FLAG_ADDR);
1679 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1680 }
1681 }
1682 }
1683 else
1684 {
1685 /*
1686 * Dirty zero or ballooned page.
1687 */
1688#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1689 if (paLSPages)
1690 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#2");
1691#endif
1692 PGM_UNLOCK(pVM);
1693
1694 uint8_t u8RecType = fBallooned ? PGM_STATE_REC_RAM_BALLOONED : PGM_STATE_REC_RAM_ZERO;
1695 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1696 rc = SSMR3PutU8(pSSM, u8RecType);
1697 else
1698 {
1699 SSMR3PutU8(pSSM, u8RecType | PGM_STATE_REC_FLAG_ADDR);
1700 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1701 }
1702 }
1703 if (RT_FAILURE(rc))
1704 return rc;
1705
1706 PGM_LOCK_VOID(pVM);
1707 if (!fSkipped)
1708 GCPhysLast = GCPhys;
1709 if (paLSPages)
1710 {
1711 paLSPages[iPage].fDirty = 0;
1712 pVM->pgm.s.LiveSave.Ram.cReadyPages++;
1713 if (fZero)
1714 pVM->pgm.s.LiveSave.Ram.cZeroPages++;
1715 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1716 pVM->pgm.s.LiveSave.cSavedPages++;
1717 }
1718 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1719 {
1720 GCPhysCur = GCPhys | GUEST_PAGE_OFFSET_MASK;
1721 break; /* restart */
1722 }
1723
1724 } /* for each page in range */
1725
1726 if (GCPhysCur != 0)
1727 break; /* Yield + ramrange change */
1728 GCPhysCur = pCur->GCPhysLast;
1729 }
1730 } /* for each range */
1731 } while (pCur);
1732
1733 PGM_UNLOCK(pVM);
1734
1735 return VINF_SUCCESS;
1736}
1737
1738
1739/**
1740 * Cleans up RAM pages after a live save.
1741 *
1742 * @param pVM The cross context VM structure.
1743 */
1744static void pgmR3DoneRamPages(PVM pVM)
1745{
1746 /*
1747 * Free the tracking arrays and disable write monitoring.
1748 *
1749 * Play nice with the PGM lock in case we're called while the VM is still
1750 * running. This means we have to delay the freeing since we wish to use
1751 * paLSPages as an indicator of which RAM ranges which we need to scan for
1752 * write monitored pages.
1753 */
1754 void *pvToFree = NULL;
1755 PPGMRAMRANGE pCur;
1756 uint32_t cMonitoredPages = 0;
1757 PGM_LOCK_VOID(pVM);
1758 do
1759 {
1760 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1761 {
1762 if (pCur->paLSPages)
1763 {
1764 if (pvToFree)
1765 {
1766 uint32_t idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1767 PGM_UNLOCK(pVM);
1768 MMR3HeapFree(pvToFree);
1769 pvToFree = NULL;
1770 PGM_LOCK_VOID(pVM);
1771 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1772 break; /* start over again. */
1773 }
1774
1775 pvToFree = pCur->paLSPages;
1776 pCur->paLSPages = NULL;
1777
1778 uint32_t iPage = pCur->cb >> GUEST_PAGE_SHIFT;
1779 while (iPage--)
1780 {
1781 PPGMPAGE pPage = &pCur->aPages[iPage];
1782 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
1783 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1784 {
1785 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1786 cMonitoredPages++;
1787 }
1788 }
1789 }
1790 }
1791 } while (pCur);
1792
1793 Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages);
1794 if (pVM->pgm.s.cMonitoredPages < cMonitoredPages)
1795 pVM->pgm.s.cMonitoredPages = 0;
1796 else
1797 pVM->pgm.s.cMonitoredPages -= cMonitoredPages;
1798
1799 PGM_UNLOCK(pVM);
1800
1801 MMR3HeapFree(pvToFree);
1802 pvToFree = NULL;
1803}
1804
1805
1806/**
1807 * @callback_method_impl{FNSSMINTLIVEEXEC}
1808 */
1809static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1810{
1811 int rc;
1812
1813 /*
1814 * Save the MMIO2 and ROM range IDs in pass 0.
1815 */
1816 if (uPass == 0)
1817 {
1818 rc = pgmR3SaveRamConfig(pVM, pSSM);
1819 if (RT_FAILURE(rc))
1820 return rc;
1821 rc = pgmR3SaveRomRanges(pVM, pSSM);
1822 if (RT_FAILURE(rc))
1823 return rc;
1824 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1825 if (RT_FAILURE(rc))
1826 return rc;
1827 }
1828 /*
1829 * Reset the page-per-second estimate to avoid inflation by the initial
1830 * load of zero pages. pgmR3LiveVote ASSUMES this is done at pass 7.
1831 */
1832 else if (uPass == 7)
1833 {
1834 pVM->pgm.s.LiveSave.cSavedPages = 0;
1835 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
1836 }
1837
1838 /*
1839 * Do the scanning.
1840 */
1841 pgmR3ScanRomPages(pVM);
1842 pgmR3ScanMmio2Pages(pVM, uPass);
1843 pgmR3ScanRamPages(pVM, false /*fFinalPass*/);
1844 pgmR3PoolClearAll(pVM, true /*fFlushRemTlb*/); /** @todo this could perhaps be optimized a bit. */
1845
1846 /*
1847 * Save the pages.
1848 */
1849 if (uPass == 0)
1850 rc = pgmR3SaveRomVirginPages( pVM, pSSM, true /*fLiveSave*/);
1851 else
1852 rc = VINF_SUCCESS;
1853 if (RT_SUCCESS(rc))
1854 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
1855 if (RT_SUCCESS(rc))
1856 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, uPass);
1857 if (RT_SUCCESS(rc))
1858 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, uPass);
1859 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes care of it.) */
1860
1861 return rc;
1862}
1863
1864
1865/**
1866 * @callback_method_impl{FNSSMINTLIVEVOTE}
1867 */
1868static DECLCALLBACK(int) pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1869{
1870 /*
1871 * Update and calculate parameters used in the decision making.
1872 */
1873 const uint32_t cHistoryEntries = RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory);
1874
1875 /* update history. */
1876 PGM_LOCK_VOID(pVM);
1877 uint32_t const cWrittenToPages = pVM->pgm.s.cWrittenToPages;
1878 PGM_UNLOCK(pVM);
1879 uint32_t const cDirtyNow = pVM->pgm.s.LiveSave.Rom.cDirtyPages
1880 + pVM->pgm.s.LiveSave.Mmio2.cDirtyPages
1881 + pVM->pgm.s.LiveSave.Ram.cDirtyPages
1882 + cWrittenToPages;
1883 uint32_t i = pVM->pgm.s.LiveSave.iDirtyPagesHistory;
1884 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = cDirtyNow;
1885 pVM->pgm.s.LiveSave.iDirtyPagesHistory = (i + 1) % cHistoryEntries;
1886
1887 /* calc shortterm average (4 passes). */
1888 AssertCompile(RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory) > 4);
1889 uint64_t cTotal = pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1890 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 1) % cHistoryEntries];
1891 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 2) % cHistoryEntries];
1892 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 3) % cHistoryEntries];
1893 uint32_t const cDirtyPagesShort = cTotal / 4;
1894 pVM->pgm.s.LiveSave.cDirtyPagesShort = cDirtyPagesShort;
1895
1896 /* calc longterm average. */
1897 cTotal = 0;
1898 if (uPass < cHistoryEntries)
1899 for (i = 0; i < cHistoryEntries && i <= uPass; i++)
1900 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1901 else
1902 for (i = 0; i < cHistoryEntries; i++)
1903 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1904 uint32_t const cDirtyPagesLong = cTotal / cHistoryEntries;
1905 pVM->pgm.s.LiveSave.cDirtyPagesLong = cDirtyPagesLong;
1906
1907 /* estimate the speed */
1908 uint64_t cNsElapsed = RTTimeNanoTS() - pVM->pgm.s.LiveSave.uSaveStartNS;
1909 uint32_t cPagesPerSecond = (uint32_t)( (long double)pVM->pgm.s.LiveSave.cSavedPages
1910 / ((long double)cNsElapsed / 1000000000.0) );
1911 pVM->pgm.s.LiveSave.cPagesPerSecond = cPagesPerSecond;
1912
1913 /*
1914 * Try make a decision.
1915 */
1916 if ( cDirtyPagesShort <= cDirtyPagesLong
1917 && ( cDirtyNow <= cDirtyPagesShort
1918 || cDirtyNow - cDirtyPagesShort < RT_MIN(cDirtyPagesShort / 8, 16)
1919 )
1920 )
1921 {
1922 if (uPass > 10)
1923 {
1924 uint32_t cMsLeftShort = (uint32_t)(cDirtyPagesShort / (long double)cPagesPerSecond * 1000.0);
1925 uint32_t cMsLeftLong = (uint32_t)(cDirtyPagesLong / (long double)cPagesPerSecond * 1000.0);
1926 uint32_t cMsMaxDowntime = SSMR3HandleMaxDowntime(pSSM);
1927 if (cMsMaxDowntime < 32)
1928 cMsMaxDowntime = 32;
1929 if ( ( cMsLeftLong <= cMsMaxDowntime
1930 && cMsLeftShort < cMsMaxDowntime)
1931 || cMsLeftShort < cMsMaxDowntime / 2
1932 )
1933 {
1934 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u|%ums cDirtyPagesLong=%u|%ums cMsMaxDowntime=%u\n",
1935 uPass, cDirtyPagesShort, cMsLeftShort, cDirtyPagesLong, cMsLeftLong, cMsMaxDowntime));
1936 return VINF_SUCCESS;
1937 }
1938 }
1939 else
1940 {
1941 if ( ( cDirtyPagesShort <= 128
1942 && cDirtyPagesLong <= 1024)
1943 || cDirtyPagesLong <= 256
1944 )
1945 {
1946 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u cDirtyPagesLong=%u\n", uPass, cDirtyPagesShort, cDirtyPagesLong));
1947 return VINF_SUCCESS;
1948 }
1949 }
1950 }
1951
1952 /*
1953 * Come up with a completion percentage. Currently this is a simple
1954 * dirty page (long term) vs. total pages ratio + some pass trickery.
1955 */
1956 unsigned uPctDirty = (unsigned)( (long double)cDirtyPagesLong
1957 / (pVM->pgm.s.cAllPages - pVM->pgm.s.LiveSave.cIgnoredPages - pVM->pgm.s.cZeroPages) );
1958 if (uPctDirty <= 100)
1959 SSMR3HandleReportLivePercent(pSSM, RT_MIN(100 - uPctDirty, uPass * 2));
1960 else
1961 AssertMsgFailed(("uPctDirty=%u cDirtyPagesLong=%#x cAllPages=%#x cIgnoredPages=%#x cZeroPages=%#x\n",
1962 uPctDirty, cDirtyPagesLong, pVM->pgm.s.cAllPages, pVM->pgm.s.LiveSave.cIgnoredPages, pVM->pgm.s.cZeroPages));
1963
1964 return VINF_SSM_VOTE_FOR_ANOTHER_PASS;
1965}
1966
1967
1968/**
1969 * @callback_method_impl{FNSSMINTLIVEPREP}
1970 *
1971 * This will attempt to allocate and initialize the tracking structures. It
1972 * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
1973 * pgmR3SaveDone will do the cleanups.
1974 */
1975static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
1976{
1977 /*
1978 * Indicate that we will be using the write monitoring.
1979 */
1980 PGM_LOCK_VOID(pVM);
1981 /** @todo find a way of mediating this when more users are added. */
1982 if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
1983 {
1984 PGM_UNLOCK(pVM);
1985 AssertLogRelFailedReturn(VERR_PGM_WRITE_MONITOR_ENGAGED);
1986 }
1987 pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
1988 PGM_UNLOCK(pVM);
1989
1990 /*
1991 * Initialize the statistics.
1992 */
1993 pVM->pgm.s.LiveSave.Rom.cReadyPages = 0;
1994 pVM->pgm.s.LiveSave.Rom.cDirtyPages = 0;
1995 pVM->pgm.s.LiveSave.Mmio2.cReadyPages = 0;
1996 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages = 0;
1997 pVM->pgm.s.LiveSave.Ram.cReadyPages = 0;
1998 pVM->pgm.s.LiveSave.Ram.cDirtyPages = 0;
1999 pVM->pgm.s.LiveSave.cIgnoredPages = 0;
2000 pVM->pgm.s.LiveSave.fActive = true;
2001 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory); i++)
2002 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = UINT32_MAX / 2;
2003 pVM->pgm.s.LiveSave.iDirtyPagesHistory = 0;
2004 pVM->pgm.s.LiveSave.cSavedPages = 0;
2005 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
2006 pVM->pgm.s.LiveSave.cPagesPerSecond = 8192;
2007
2008 /*
2009 * Per page type.
2010 */
2011 int rc = pgmR3PrepRomPages(pVM);
2012 if (RT_SUCCESS(rc))
2013 rc = pgmR3PrepMmio2Pages(pVM);
2014 if (RT_SUCCESS(rc))
2015 rc = pgmR3PrepRamPages(pVM);
2016
2017 NOREF(pSSM);
2018 return rc;
2019}
2020
2021
2022/**
2023 * @callback_method_impl{FNSSMINTSAVEEXEC}
2024 */
2025static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
2026{
2027 PPGM pPGM = &pVM->pgm.s;
2028
2029 /*
2030 * Lock PGM and set the no-more-writes indicator.
2031 */
2032 PGM_LOCK_VOID(pVM);
2033 pVM->pgm.s.fNoMorePhysWrites = true;
2034
2035 /*
2036 * Save basic data (required / unaffected by relocation).
2037 */
2038 int rc = SSMR3PutStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFields[0], NULL /*pvUser*/);
2039
2040 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus && RT_SUCCESS(rc); idCpu++)
2041 rc = SSMR3PutStruct(pSSM, &pVM->apCpusR3[idCpu]->pgm.s, &s_aPGMCpuFields[0]);
2042
2043 /*
2044 * Save the (remainder of the) memory.
2045 */
2046 if (RT_SUCCESS(rc))
2047 {
2048 if (pVM->pgm.s.LiveSave.fActive)
2049 {
2050 pgmR3ScanRomPages(pVM);
2051 pgmR3ScanMmio2Pages(pVM, SSM_PASS_FINAL);
2052 pgmR3ScanRamPages(pVM, true /*fFinalPass*/);
2053
2054 rc = pgmR3SaveShadowedRomPages( pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
2055 if (RT_SUCCESS(rc))
2056 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2057 if (RT_SUCCESS(rc))
2058 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2059 }
2060 else
2061 {
2062 rc = pgmR3SaveRamConfig(pVM, pSSM);
2063 if (RT_SUCCESS(rc))
2064 rc = pgmR3SaveRomRanges(pVM, pSSM);
2065 if (RT_SUCCESS(rc))
2066 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
2067 if (RT_SUCCESS(rc))
2068 rc = pgmR3SaveRomVirginPages( pVM, pSSM, false /*fLiveSave*/);
2069 if (RT_SUCCESS(rc))
2070 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
2071 if (RT_SUCCESS(rc))
2072 rc = pgmR3SaveMmio2Pages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2073 if (RT_SUCCESS(rc))
2074 rc = pgmR3SaveRamPages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2075 }
2076 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
2077 }
2078
2079 PGM_UNLOCK(pVM);
2080 return rc;
2081}
2082
2083
2084/**
2085 * @callback_method_impl{FNSSMINTSAVEDONE}
2086 */
2087static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
2088{
2089 /*
2090 * Do per page type cleanups first.
2091 */
2092 if (pVM->pgm.s.LiveSave.fActive)
2093 {
2094 pgmR3DoneRomPages(pVM);
2095 pgmR3DoneMmio2Pages(pVM);
2096 pgmR3DoneRamPages(pVM);
2097 }
2098
2099 /*
2100 * Clear the live save indicator and disengage write monitoring.
2101 */
2102 PGM_LOCK_VOID(pVM);
2103 pVM->pgm.s.LiveSave.fActive = false;
2104 /** @todo this is blindly assuming that we're the only user of write
2105 * monitoring. Fix this when more users are added. */
2106 pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
2107 PGM_UNLOCK(pVM);
2108
2109 NOREF(pSSM);
2110 return VINF_SUCCESS;
2111}
2112
2113
2114/**
2115 * @callback_method_impl{FNSSMINTLOADPREP}
2116 */
2117static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
2118{
2119 /*
2120 * Call the reset function to make sure all the memory is cleared.
2121 */
2122 PGMR3Reset(pVM);
2123 pVM->pgm.s.LiveSave.fActive = false;
2124 NOREF(pSSM);
2125 return VINF_SUCCESS;
2126}
2127
2128
2129/**
2130 * Load an ignored page.
2131 *
2132 * @returns VBox status code.
2133 * @param pSSM The saved state handle.
2134 */
2135static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
2136{
2137 uint8_t abPage[GUEST_PAGE_SIZE];
2138 return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
2139}
2140
2141
2142/**
2143 * Compares a page with an old save type value.
2144 *
2145 * @returns true if equal, false if not.
2146 * @param pPage The page to compare.
2147 * @param uOldType The old type value from the saved state.
2148 */
2149DECLINLINE(bool) pgmR3CompareNewAndOldPageTypes(PPGMPAGE pPage, uint8_t uOldType)
2150{
2151 uint8_t uOldPageType;
2152 switch (PGM_PAGE_GET_TYPE(pPage))
2153 {
2154 case PGMPAGETYPE_INVALID: uOldPageType = PGMPAGETYPE_OLD_INVALID; break;
2155 case PGMPAGETYPE_RAM: uOldPageType = PGMPAGETYPE_OLD_RAM; break;
2156 case PGMPAGETYPE_MMIO2: uOldPageType = PGMPAGETYPE_OLD_MMIO2; break;
2157 case PGMPAGETYPE_MMIO2_ALIAS_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO; break;
2158 case PGMPAGETYPE_ROM_SHADOW: uOldPageType = PGMPAGETYPE_OLD_ROM_SHADOW; break;
2159 case PGMPAGETYPE_ROM: uOldPageType = PGMPAGETYPE_OLD_ROM; break;
2160 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: RT_FALL_THRU();
2161 case PGMPAGETYPE_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO; break;
2162 default:
2163 AssertFailed();
2164 uOldPageType = PGMPAGETYPE_OLD_INVALID;
2165 break;
2166 }
2167 return uOldPageType == uOldType;
2168}
2169
2170
2171/**
2172 * Loads a page without any bits in the saved state, i.e. making sure it's
2173 * really zero.
2174 *
2175 * @returns VBox status code.
2176 * @param pVM The cross context VM structure.
2177 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2178 * state).
2179 * @param pPage The guest page tracking structure.
2180 * @param GCPhys The page address.
2181 * @param pRam The ram range (logging).
2182 */
2183static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2184{
2185 if ( uOldType != PGMPAGETYPE_OLD_INVALID
2186 && !pgmR3CompareNewAndOldPageTypes(pPage, uOldType))
2187 return VERR_SSM_UNEXPECTED_DATA;
2188
2189 /* I think this should be sufficient. */
2190 if ( !PGM_PAGE_IS_ZERO(pPage)
2191 && !PGM_PAGE_IS_BALLOONED(pPage))
2192 return VERR_SSM_UNEXPECTED_DATA;
2193
2194 NOREF(pVM);
2195 NOREF(GCPhys);
2196 NOREF(pRam);
2197 return VINF_SUCCESS;
2198}
2199
2200
2201/**
2202 * Loads a page from the saved state.
2203 *
2204 * @returns VBox status code.
2205 * @param pVM The cross context VM structure.
2206 * @param pSSM The SSM handle.
2207 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2208 * state).
2209 * @param pPage The guest page tracking structure.
2210 * @param GCPhys The page address.
2211 * @param pRam The ram range (logging).
2212 */
2213static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2214{
2215 /*
2216 * Match up the type, dealing with MMIO2 aliases (dropped).
2217 */
2218 AssertLogRelMsgReturn( uOldType == PGMPAGETYPE_INVALID
2219 || pgmR3CompareNewAndOldPageTypes(pPage, uOldType)
2220 /* kudge for the expanded PXE bios (r67885) - @bugref{5687}: */
2221 || ( uOldType == PGMPAGETYPE_OLD_RAM
2222 && GCPhys >= 0xed000
2223 && GCPhys <= 0xeffff
2224 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM)
2225 ,
2226 ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
2227 VERR_SSM_UNEXPECTED_DATA);
2228
2229 /*
2230 * Load the page.
2231 */
2232 PGMPAGEMAPLOCK PgMpLck;
2233 void *pvPage;
2234 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
2235 if (RT_SUCCESS(rc))
2236 {
2237 rc = SSMR3GetMem(pSSM, pvPage, GUEST_PAGE_SIZE);
2238 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2239 }
2240
2241 return rc;
2242}
2243
2244
2245/**
2246 * Loads a page (counter part to pgmR3SavePage).
2247 *
2248 * @returns VBox status code, fully bitched errors.
2249 * @param pVM The cross context VM structure.
2250 * @param pSSM The SSM handle.
2251 * @param uOldType The page type.
2252 * @param pPage The page.
2253 * @param GCPhys The page address.
2254 * @param pRam The RAM range (for error messages).
2255 */
2256static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2257{
2258 uint8_t uState;
2259 int rc = SSMR3GetU8(pSSM, &uState);
2260 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
2261 if (uState == 0 /* zero */)
2262 rc = pgmR3LoadPageZeroOld(pVM, uOldType, pPage, GCPhys, pRam);
2263 else if (uState == 1)
2264 rc = pgmR3LoadPageBitsOld(pVM, pSSM, uOldType, pPage, GCPhys, pRam);
2265 else
2266 rc = VERR_PGM_INVALID_SAVED_PAGE_STATE;
2267 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uOldType=%d GCPhys=%RGp %s rc=%Rrc\n",
2268 pPage, uState, uOldType, GCPhys, pRam->pszDesc, rc),
2269 rc);
2270 return VINF_SUCCESS;
2271}
2272
2273
2274/**
2275 * Loads a shadowed ROM page.
2276 *
2277 * @returns VBox status code, errors are fully bitched.
2278 * @param pVM The cross context VM structure.
2279 * @param pSSM The saved state handle.
2280 * @param pPage The page.
2281 * @param GCPhys The page address.
2282 * @param pRam The RAM range (for error messages).
2283 */
2284static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2285{
2286 /*
2287 * Load and set the protection first, then load the two pages, the first
2288 * one is the active the other is the passive.
2289 */
2290 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
2291 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2292
2293 uint8_t uProt;
2294 int rc = SSMR3GetU8(pSSM, &uProt);
2295 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
2296 PGMROMPROT enmProt = (PGMROMPROT)uProt;
2297 AssertLogRelMsgReturn( enmProt >= PGMROMPROT_INVALID
2298 && enmProt < PGMROMPROT_END,
2299 ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
2300 VERR_SSM_UNEXPECTED_DATA);
2301
2302 if (pRomPage->enmProt != enmProt)
2303 {
2304 rc = PGMR3PhysRomProtect(pVM, GCPhys, GUEST_PAGE_SIZE, enmProt);
2305 AssertLogRelRCReturn(rc, rc);
2306 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2307 }
2308
2309 PPGMPAGE pPageActive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2310 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2311 uint8_t u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM : PGMPAGETYPE_ROM_SHADOW;
2312 uint8_t u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
2313
2314 /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
2315 * used down the line (will the 2nd page will be written to the first
2316 * one because of a false TLB hit since the TLB is using GCPhys and
2317 * doesn't check the HCPhys of the desired page). */
2318 rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
2319 if (RT_SUCCESS(rc))
2320 {
2321 *pPageActive = *pPage;
2322 rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
2323 }
2324 return rc;
2325}
2326
2327/**
2328 * Ram range flags and bits for older versions of the saved state.
2329 *
2330 * @returns VBox status code.
2331 *
2332 * @param pVM The cross context VM structure.
2333 * @param pSSM The SSM handle.
2334 * @param uVersion The saved state version.
2335 */
2336static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2337{
2338 PPGM pPGM = &pVM->pgm.s;
2339
2340 /*
2341 * Ram range flags and bits.
2342 */
2343 uint32_t i = 0;
2344 for (PPGMRAMRANGE pRam = pPGM->pRamRangesXR3; ; pRam = pRam->pNextR3, i++)
2345 {
2346 /* Check the sequence number / separator. */
2347 uint32_t u32Sep;
2348 int rc = SSMR3GetU32(pSSM, &u32Sep);
2349 if (RT_FAILURE(rc))
2350 return rc;
2351 if (u32Sep == ~0U)
2352 break;
2353 if (u32Sep != i)
2354 {
2355 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2356 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2357 }
2358 AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2359
2360 /* Get the range details. */
2361 RTGCPHYS GCPhys;
2362 SSMR3GetGCPhys(pSSM, &GCPhys);
2363 RTGCPHYS GCPhysLast;
2364 SSMR3GetGCPhys(pSSM, &GCPhysLast);
2365 RTGCPHYS cb;
2366 SSMR3GetGCPhys(pSSM, &cb);
2367 uint8_t fHaveBits;
2368 rc = SSMR3GetU8(pSSM, &fHaveBits);
2369 if (RT_FAILURE(rc))
2370 return rc;
2371 if (fHaveBits & ~1)
2372 {
2373 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2374 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2375 }
2376 size_t cchDesc = 0;
2377 char szDesc[256];
2378 szDesc[0] = '\0';
2379 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2380 {
2381 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
2382 if (RT_FAILURE(rc))
2383 return rc;
2384 /* Since we've modified the description strings in r45878, only compare
2385 them if the saved state is more recent. */
2386 if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
2387 cchDesc = strlen(szDesc);
2388 }
2389
2390 /*
2391 * Match it up with the current range.
2392 *
2393 * Note there is a hack for dealing with the high BIOS mapping
2394 * in the old saved state format, this means we might not have
2395 * a 1:1 match on success.
2396 */
2397 if ( ( GCPhys != pRam->GCPhys
2398 || GCPhysLast != pRam->GCPhysLast
2399 || cb != pRam->cb
2400 || ( cchDesc
2401 && strcmp(szDesc, pRam->pszDesc)) )
2402 /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
2403 && ( uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
2404 || GCPhys != UINT32_C(0xfff80000)
2405 || GCPhysLast != UINT32_C(0xffffffff)
2406 || pRam->GCPhysLast != GCPhysLast
2407 || pRam->GCPhys < GCPhys
2408 || !fHaveBits)
2409 )
2410 {
2411 LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
2412 "State : %RGp-%RGp %RGp bytes %s %s\n",
2413 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc,
2414 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
2415 /*
2416 * If we're loading a state for debugging purpose, don't make a fuss if
2417 * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
2418 */
2419 if ( SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
2420 || GCPhys < 8 * _1M)
2421 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2422 N_("RAM range mismatch; saved={%RGp-%RGp %RGp bytes %s %s} config={%RGp-%RGp %RGp bytes %s %s}"),
2423 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc,
2424 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc);
2425
2426 AssertMsgFailed(("debug skipping not implemented, sorry\n"));
2427 continue;
2428 }
2429
2430 uint32_t cPages = (GCPhysLast - GCPhys + 1) >> GUEST_PAGE_SHIFT;
2431 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2432 {
2433 /*
2434 * Load the pages one by one.
2435 */
2436 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2437 {
2438 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2439 PPGMPAGE pPage = &pRam->aPages[iPage];
2440 uint8_t uOldType;
2441 rc = SSMR3GetU8(pSSM, &uOldType);
2442 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
2443 if (uOldType == PGMPAGETYPE_OLD_ROM_SHADOW)
2444 rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
2445 else
2446 rc = pgmR3LoadPageOld(pVM, pSSM, uOldType, pPage, GCPhysPage, pRam);
2447 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2448 }
2449 }
2450 else
2451 {
2452 /*
2453 * Old format.
2454 */
2455
2456 /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
2457 The rest is generally irrelevant and wrong since the stuff have to match registrations. */
2458 uint32_t fFlags = 0;
2459 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2460 {
2461 uint16_t u16Flags;
2462 rc = SSMR3GetU16(pSSM, &u16Flags);
2463 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2464 fFlags |= u16Flags;
2465 }
2466
2467 /* Load the bits */
2468 if ( !fHaveBits
2469 && GCPhysLast < UINT32_C(0xe0000000))
2470 {
2471 /*
2472 * Dynamic chunks.
2473 */
2474 const uint32_t cPagesInChunk = (1*1024*1024) >> GUEST_PAGE_SHIFT;
2475 AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
2476 ("cPages=%#x cPagesInChunk=%#x GCPhys=%RGp %s\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
2477 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2478
2479 for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
2480 {
2481 uint8_t fPresent;
2482 rc = SSMR3GetU8(pSSM, &fPresent);
2483 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2484 AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
2485 ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
2486 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2487
2488 for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
2489 {
2490 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2491 PPGMPAGE pPage = &pRam->aPages[iPage];
2492 if (fPresent)
2493 {
2494 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO
2495 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
2496 rc = pgmR3LoadPageToDevNullOld(pSSM);
2497 else
2498 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2499 }
2500 else
2501 rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2502 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2503 }
2504 }
2505 }
2506 else if (pRam->pvR3)
2507 {
2508 /*
2509 * MMIO2.
2510 */
2511 AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
2512 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2513 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2514 AssertLogRelMsgReturn(pRam->pvR3,
2515 ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
2516 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2517
2518 rc = SSMR3GetMem(pSSM, pRam->pvR3, pRam->cb);
2519 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
2520 }
2521 else if (GCPhysLast < UINT32_C(0xfff80000))
2522 {
2523 /*
2524 * PCI MMIO, no pages saved.
2525 */
2526 }
2527 else
2528 {
2529 /*
2530 * Load the 0xfff80000..0xffffffff BIOS range.
2531 * It starts with X reserved pages that we have to skip over since
2532 * the RAMRANGE create by the new code won't include those.
2533 */
2534 AssertLogRelMsgReturn( !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
2535 && (fFlags & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
2536 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2537 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2538 AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
2539 ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
2540 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2541
2542 /* Skip wasted reserved pages before the ROM. */
2543 while (GCPhys < pRam->GCPhys)
2544 {
2545 rc = pgmR3LoadPageToDevNullOld(pSSM);
2546 GCPhys += GUEST_PAGE_SIZE;
2547 }
2548
2549 /* Load the bios pages. */
2550 cPages = pRam->cb >> GUEST_PAGE_SHIFT;
2551 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2552 {
2553 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2554 PPGMPAGE pPage = &pRam->aPages[iPage];
2555
2556 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
2557 ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
2558 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2559 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
2560 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2561 }
2562 }
2563 }
2564 }
2565
2566 return VINF_SUCCESS;
2567}
2568
2569
2570/**
2571 * Worker for pgmR3Load and pgmR3LoadLocked.
2572 *
2573 * @returns VBox status code.
2574 *
2575 * @param pVM The cross context VM structure.
2576 * @param pSSM The SSM handle.
2577 * @param uVersion The PGM saved state unit version.
2578 * @param uPass The pass number.
2579 *
2580 * @todo This needs splitting up if more record types or code twists are
2581 * added...
2582 */
2583static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2584{
2585 NOREF(uPass);
2586
2587 /*
2588 * Process page records until we hit the terminator.
2589 */
2590 RTGCPHYS GCPhys = NIL_RTGCPHYS;
2591 PPGMRAMRANGE pRamHint = NULL;
2592 uint8_t id = UINT8_MAX;
2593 uint32_t iPage = UINT32_MAX - 10;
2594 PPGMROMRANGE pRom = NULL;
2595 PPGMREGMMIO2RANGE pRegMmio = NULL;
2596
2597 /*
2598 * We batch up pages that should be freed instead of calling GMM for
2599 * each and every one of them. Note that we'll lose the pages in most
2600 * failure paths - this should probably be addressed one day.
2601 */
2602 uint32_t cPendingPages = 0;
2603 PGMMFREEPAGESREQ pReq;
2604 int rc = GMMR3FreePagesPrepare(pVM, &pReq, 128 /* batch size */, GMMACCOUNT_BASE);
2605 AssertLogRelRCReturn(rc, rc);
2606
2607 for (;;)
2608 {
2609 /*
2610 * Get the record type and flags.
2611 */
2612 uint8_t u8;
2613 rc = SSMR3GetU8(pSSM, &u8);
2614 if (RT_FAILURE(rc))
2615 return rc;
2616 if (u8 == PGM_STATE_REC_END)
2617 {
2618 /*
2619 * Finish off any pages pending freeing.
2620 */
2621 if (cPendingPages)
2622 {
2623 Log(("pgmR3LoadMemory: GMMR3FreePagesPerform pVM=%p cPendingPages=%u\n", pVM, cPendingPages));
2624 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2625 AssertLogRelRCReturn(rc, rc);
2626 }
2627 GMMR3FreePagesCleanup(pReq);
2628 return VINF_SUCCESS;
2629 }
2630 AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2631 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2632 {
2633 /*
2634 * RAM page.
2635 */
2636 case PGM_STATE_REC_RAM_ZERO:
2637 case PGM_STATE_REC_RAM_RAW:
2638 case PGM_STATE_REC_RAM_BALLOONED:
2639 {
2640 /*
2641 * Get the address and resolve it into a page descriptor.
2642 */
2643 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2644 GCPhys += GUEST_PAGE_SIZE;
2645 else
2646 {
2647 rc = SSMR3GetGCPhys(pSSM, &GCPhys);
2648 if (RT_FAILURE(rc))
2649 return rc;
2650 }
2651 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2652
2653 PPGMPAGE pPage;
2654 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
2655 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2656
2657 /*
2658 * Take action according to the record type.
2659 */
2660 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2661 {
2662 case PGM_STATE_REC_RAM_ZERO:
2663 {
2664 if (PGM_PAGE_IS_ZERO(pPage))
2665 break;
2666
2667 /* Ballooned pages must be unmarked (live snapshot and
2668 teleportation scenarios). */
2669 if (PGM_PAGE_IS_BALLOONED(pPage))
2670 {
2671 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2672 if (uVersion == PGM_SAVED_STATE_VERSION_BALLOON_BROKEN)
2673 break;
2674 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2675 break;
2676 }
2677
2678 AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_UNEXPECTED_PAGE_STATE);
2679
2680 /* If this is a ROM page, we must clear it and not try to
2681 * free it. Ditto if the VM is using RamPreAlloc (see
2682 * @bugref{6318}). */
2683 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM
2684 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM_SHADOW
2685#ifdef VBOX_WITH_PGM_NEM_MODE
2686 || pVM->pgm.s.fNemMode
2687#endif
2688 || pVM->pgm.s.fRamPreAlloc)
2689 {
2690 PGMPAGEMAPLOCK PgMpLck;
2691 void *pvDstPage;
2692 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2693 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2694
2695 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
2696 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2697 }
2698 /* Free it only if it's not part of a previously
2699 allocated large page (no need to clear the page). */
2700 else if ( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2701 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED)
2702 {
2703 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2704 AssertRCReturn(rc, rc);
2705 }
2706 /** @todo handle large pages (see @bugref{5545}) */
2707 break;
2708 }
2709
2710 case PGM_STATE_REC_RAM_BALLOONED:
2711 {
2712 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2713 if (PGM_PAGE_IS_BALLOONED(pPage))
2714 break;
2715
2716 /* We don't map ballooned pages in our shadow page tables, let's
2717 just free it if allocated and mark as ballooned. See @bugref{5515}. */
2718 if (PGM_PAGE_IS_ALLOCATED(pPage))
2719 {
2720 /** @todo handle large pages + ballooning when it works. (see @bugref{5515},
2721 * @bugref{5545}). */
2722 AssertLogRelMsgReturn( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2723 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED,
2724 ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_LOAD_UNEXPECTED_PAGE_TYPE);
2725
2726 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2727 AssertRCReturn(rc, rc);
2728 }
2729 Assert(PGM_PAGE_IS_ZERO(pPage));
2730 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
2731 break;
2732 }
2733
2734 case PGM_STATE_REC_RAM_RAW:
2735 {
2736 PGMPAGEMAPLOCK PgMpLck;
2737 void *pvDstPage;
2738 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2739 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2740 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
2741 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2742 if (RT_FAILURE(rc))
2743 return rc;
2744 break;
2745 }
2746
2747 default:
2748 AssertMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
2749 }
2750 id = UINT8_MAX;
2751 break;
2752 }
2753
2754 /*
2755 * MMIO2 page.
2756 */
2757 case PGM_STATE_REC_MMIO2_RAW:
2758 case PGM_STATE_REC_MMIO2_ZERO:
2759 {
2760 /*
2761 * Get the ID + page number and resolved that into a MMIO2 page.
2762 */
2763 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2764 iPage++;
2765 else
2766 {
2767 SSMR3GetU8(pSSM, &id);
2768 rc = SSMR3GetU32(pSSM, &iPage);
2769 if (RT_FAILURE(rc))
2770 return rc;
2771 }
2772 if ( !pRegMmio
2773 || pRegMmio->idSavedState != id)
2774 {
2775 for (pRegMmio = pVM->pgm.s.pRegMmioRangesR3; pRegMmio; pRegMmio = pRegMmio->pNextR3)
2776 if (pRegMmio->idSavedState == id)
2777 break;
2778 AssertLogRelMsgReturn(pRegMmio, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND);
2779 }
2780 AssertLogRelMsgReturn(iPage < (pRegMmio->RamRange.cb >> GUEST_PAGE_SHIFT),
2781 ("iPage=%#x cb=%RGp %s\n", iPage, pRegMmio->RamRange.cb, pRegMmio->RamRange.pszDesc),
2782 VERR_PGM_SAVED_MMIO2_PAGE_NOT_FOUND);
2783 void *pvDstPage = (uint8_t *)pRegMmio->RamRange.pvR3 + ((size_t)iPage << GUEST_PAGE_SHIFT);
2784
2785 /*
2786 * Load the page bits.
2787 */
2788 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_MMIO2_ZERO)
2789 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
2790 else
2791 {
2792 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
2793 if (RT_FAILURE(rc))
2794 return rc;
2795 }
2796 GCPhys = NIL_RTGCPHYS;
2797 break;
2798 }
2799
2800 /*
2801 * ROM pages.
2802 */
2803 case PGM_STATE_REC_ROM_VIRGIN:
2804 case PGM_STATE_REC_ROM_SHW_RAW:
2805 case PGM_STATE_REC_ROM_SHW_ZERO:
2806 case PGM_STATE_REC_ROM_PROT:
2807 {
2808 /*
2809 * Get the ID + page number and resolved that into a ROM page descriptor.
2810 */
2811 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2812 iPage++;
2813 else
2814 {
2815 SSMR3GetU8(pSSM, &id);
2816 rc = SSMR3GetU32(pSSM, &iPage);
2817 if (RT_FAILURE(rc))
2818 return rc;
2819 }
2820 if ( !pRom
2821 || pRom->idSavedState != id)
2822 {
2823 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2824 if (pRom->idSavedState == id)
2825 break;
2826 AssertLogRelMsgReturn(pRom, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_ROM_RANGE_NOT_FOUND);
2827 }
2828 AssertLogRelMsgReturn(iPage < (pRom->cb >> GUEST_PAGE_SHIFT),
2829 ("iPage=%#x cb=%RGp %s\n", iPage, pRom->cb, pRom->pszDesc),
2830 VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2831 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2832 GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
2833
2834 /*
2835 * Get and set the protection.
2836 */
2837 uint8_t u8Prot;
2838 rc = SSMR3GetU8(pSSM, &u8Prot);
2839 if (RT_FAILURE(rc))
2840 return rc;
2841 PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
2842 AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_PGM_SAVED_ROM_PAGE_PROT);
2843
2844 if (enmProt != pRomPage->enmProt)
2845 {
2846 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
2847 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2848 N_("Protection change of unshadowed ROM page: GCPhys=%RGp enmProt=%d %s"),
2849 GCPhys, enmProt, pRom->pszDesc);
2850 rc = PGMR3PhysRomProtect(pVM, GCPhys, GUEST_PAGE_SIZE, enmProt);
2851 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2852 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2853 }
2854 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
2855 break; /* done */
2856
2857 /*
2858 * Get the right page descriptor.
2859 */
2860 PPGMPAGE pRealPage;
2861 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2862 {
2863 case PGM_STATE_REC_ROM_VIRGIN:
2864 if (!PGMROMPROT_IS_ROM(enmProt))
2865 pRealPage = &pRomPage->Virgin;
2866 else
2867 pRealPage = NULL;
2868 break;
2869
2870 case PGM_STATE_REC_ROM_SHW_RAW:
2871 case PGM_STATE_REC_ROM_SHW_ZERO:
2872 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
2873 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2874 N_("Shadowed / non-shadowed page type mismatch: GCPhys=%RGp enmProt=%d %s"),
2875 GCPhys, enmProt, pRom->pszDesc);
2876 if (PGMROMPROT_IS_ROM(enmProt))
2877 pRealPage = &pRomPage->Shadow;
2878 else
2879 pRealPage = NULL;
2880 break;
2881
2882 default: AssertLogRelFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE); /* shut up gcc */
2883 }
2884 if (!pRealPage)
2885 {
2886 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pRealPage, &pRamHint);
2887 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2888 }
2889
2890 /*
2891 * Make it writable and map it (if necessary).
2892 */
2893 void *pvDstPage = NULL;
2894 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2895 {
2896 case PGM_STATE_REC_ROM_SHW_ZERO:
2897 if ( PGM_PAGE_IS_ZERO(pRealPage)
2898 || PGM_PAGE_IS_BALLOONED(pRealPage))
2899 break;
2900 /** @todo implement zero page replacing. */
2901 RT_FALL_THRU();
2902 case PGM_STATE_REC_ROM_VIRGIN:
2903 case PGM_STATE_REC_ROM_SHW_RAW:
2904 {
2905 rc = pgmPhysPageMakeWritableAndMap(pVM, pRealPage, GCPhys, &pvDstPage);
2906 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2907 break;
2908 }
2909 }
2910
2911 /*
2912 * Load the bits.
2913 */
2914 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2915 {
2916 case PGM_STATE_REC_ROM_SHW_ZERO:
2917 if (pvDstPage)
2918 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
2919 break;
2920
2921 case PGM_STATE_REC_ROM_VIRGIN:
2922 case PGM_STATE_REC_ROM_SHW_RAW:
2923 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
2924 if (RT_FAILURE(rc))
2925 return rc;
2926 break;
2927 }
2928 GCPhys = NIL_RTGCPHYS;
2929 break;
2930 }
2931
2932 /*
2933 * Unknown type.
2934 */
2935 default:
2936 AssertLogRelMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
2937 }
2938 } /* forever */
2939}
2940
2941
2942/**
2943 * Worker for pgmR3Load.
2944 *
2945 * @returns VBox status code.
2946 *
2947 * @param pVM The cross context VM structure.
2948 * @param pSSM The SSM handle.
2949 * @param uVersion The saved state version.
2950 */
2951static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2952{
2953 PPGM pPGM = &pVM->pgm.s;
2954 int rc;
2955 uint32_t u32Sep;
2956
2957 /*
2958 * Load basic data (required / unaffected by relocation).
2959 */
2960 if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
2961 {
2962 if (uVersion > PGM_SAVED_STATE_VERSION_PRE_BALLOON)
2963 rc = SSMR3GetStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFields[0], NULL /*pvUser*/);
2964 else
2965 rc = SSMR3GetStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFieldsPreBalloon[0], NULL /*pvUser*/);
2966
2967 AssertLogRelRCReturn(rc, rc);
2968
2969 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2970 {
2971 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
2972 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFieldsPrePae[0]);
2973 else
2974 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFields[0]);
2975 AssertLogRelRCReturn(rc, rc);
2976 }
2977 }
2978 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2979 {
2980 AssertRelease(pVM->cCpus == 1);
2981
2982 PGMOLD pgmOld;
2983 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
2984 AssertLogRelRCReturn(rc, rc);
2985
2986 PVMCPU pVCpu0 = pVM->apCpusR3[0];
2987 pVCpu0->pgm.s.fA20Enabled = pgmOld.fA20Enabled;
2988 pVCpu0->pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
2989 pVCpu0->pgm.s.enmGuestMode = pgmOld.enmGuestMode;
2990 }
2991 else
2992 {
2993 AssertRelease(pVM->cCpus == 1);
2994
2995 SSMR3Skip(pSSM, sizeof(bool));
2996 RTGCPTR GCPtrIgn;
2997 SSMR3GetGCPtr(pSSM, &GCPtrIgn);
2998 SSMR3Skip(pSSM, sizeof(uint32_t));
2999
3000 uint32_t cbRamSizeIgnored;
3001 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
3002 if (RT_FAILURE(rc))
3003 return rc;
3004 PVMCPU pVCpu0 = pVM->apCpusR3[0];
3005 SSMR3GetGCPhys(pSSM, &pVCpu0->pgm.s.GCPhysA20Mask);
3006
3007 uint32_t u32 = 0;
3008 SSMR3GetUInt(pSSM, &u32);
3009 pVCpu0->pgm.s.fA20Enabled = !!u32;
3010 SSMR3GetUInt(pSSM, &pVCpu0->pgm.s.fSyncFlags);
3011 RTUINT uGuestMode;
3012 SSMR3GetUInt(pSSM, &uGuestMode);
3013 pVCpu0->pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
3014
3015 /* check separator. */
3016 SSMR3GetU32(pSSM, &u32Sep);
3017 if (RT_FAILURE(rc))
3018 return rc;
3019 if (u32Sep != (uint32_t)~0)
3020 {
3021 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
3022 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
3023 }
3024 }
3025
3026 /*
3027 * Fix the A20 mask.
3028 */
3029 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3030 {
3031 PVMCPU pVCpu = pVM->apCpusR3[i];
3032 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!pVCpu->pgm.s.fA20Enabled << 20);
3033 pgmR3RefreshShadowModeAfterA20Change(pVCpu);
3034 }
3035
3036 /*
3037 * The guest mappings - skipped now, see re-fixation in the caller.
3038 */
3039 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
3040 {
3041 for (uint32_t i = 0; ; i++)
3042 {
3043 rc = SSMR3GetU32(pSSM, &u32Sep); /* sequence number */
3044 if (RT_FAILURE(rc))
3045 return rc;
3046 if (u32Sep == ~0U)
3047 break;
3048 AssertMsgReturn(u32Sep == i, ("u32Sep=%#x i=%#x\n", u32Sep, i), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
3049
3050 char szDesc[256];
3051 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
3052 if (RT_FAILURE(rc))
3053 return rc;
3054 RTGCPTR GCPtrIgnore;
3055 SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* GCPtr */
3056 rc = SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* cPTs */
3057 if (RT_FAILURE(rc))
3058 return rc;
3059 }
3060 }
3061
3062 /*
3063 * Load the RAM contents.
3064 */
3065 if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
3066 {
3067 if (!pVM->pgm.s.LiveSave.fActive)
3068 {
3069 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3070 {
3071 rc = pgmR3LoadRamConfig(pVM, pSSM);
3072 if (RT_FAILURE(rc))
3073 return rc;
3074 }
3075 rc = pgmR3LoadRomRanges(pVM, pSSM);
3076 if (RT_FAILURE(rc))
3077 return rc;
3078 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3079 if (RT_FAILURE(rc))
3080 return rc;
3081 }
3082
3083 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, SSM_PASS_FINAL);
3084 }
3085 else
3086 rc = pgmR3LoadMemoryOld(pVM, pSSM, uVersion);
3087
3088 /* Refresh balloon accounting. */
3089 if (pVM->pgm.s.cBalloonedPages)
3090 {
3091 Log(("pgmR3LoadFinalLocked: pVM=%p cBalloonedPages=%#x\n", pVM, pVM->pgm.s.cBalloonedPages));
3092 rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_INFLATE, pVM->pgm.s.cBalloonedPages);
3093 AssertRCReturn(rc, rc);
3094 }
3095 return rc;
3096}
3097
3098
3099/**
3100 * @callback_method_impl{FNSSMINTLOADEXEC}
3101 */
3102static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3103{
3104 int rc;
3105
3106 /*
3107 * Validate version.
3108 */
3109 if ( ( uPass != SSM_PASS_FINAL
3110 && uVersion != PGM_SAVED_STATE_VERSION
3111 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3112 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3113 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3114 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3115 || ( uVersion != PGM_SAVED_STATE_VERSION
3116 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3117 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3118 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3119 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG
3120 && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
3121 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
3122 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
3123 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
3124 )
3125 {
3126 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
3127 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3128 }
3129
3130 /*
3131 * Do the loading while owning the lock because a bunch of the functions
3132 * we're using requires this.
3133 */
3134 if (uPass != SSM_PASS_FINAL)
3135 {
3136 PGM_LOCK_VOID(pVM);
3137 if (uPass != 0)
3138 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3139 else
3140 {
3141 pVM->pgm.s.LiveSave.fActive = true;
3142 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3143 rc = pgmR3LoadRamConfig(pVM, pSSM);
3144 else
3145 rc = VINF_SUCCESS;
3146 if (RT_SUCCESS(rc))
3147 rc = pgmR3LoadRomRanges(pVM, pSSM);
3148 if (RT_SUCCESS(rc))
3149 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3150 if (RT_SUCCESS(rc))
3151 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3152 }
3153 PGM_UNLOCK(pVM);
3154 }
3155 else
3156 {
3157 PGM_LOCK_VOID(pVM);
3158 rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
3159 pVM->pgm.s.LiveSave.fActive = false;
3160 PGM_UNLOCK(pVM);
3161 if (RT_SUCCESS(rc))
3162 {
3163 /*
3164 * We require a full resync now.
3165 */
3166 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3167 {
3168 PVMCPU pVCpu = pVM->apCpusR3[i];
3169 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
3170 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3171 /** @todo For guest PAE, we might get the wrong
3172 * aGCPhysGstPaePDs values now. We should used the
3173 * saved ones... Postponing this since it nothing new
3174 * and PAE/PDPTR needs some general readjusting, see
3175 * @bugref{5880}. */
3176 }
3177
3178 pgmR3HandlerPhysicalUpdateAll(pVM);
3179
3180 /*
3181 * Change the paging mode (indirectly restores PGMCPU::GCPhysCR3).
3182 * (Requires the CPUM state to be restored already!)
3183 */
3184 if (CPUMR3IsStateRestorePending(pVM))
3185 return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
3186 N_("PGM was unexpectedly restored before CPUM"));
3187
3188 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3189 {
3190 PVMCPU pVCpu = pVM->apCpusR3[i];
3191
3192 rc = PGMHCChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode);
3193 AssertLogRelRCReturn(rc, rc);
3194
3195 /* Update the PSE, NX flags and validity masks. */
3196 pVCpu->pgm.s.fGst32BitPageSizeExtension = CPUMIsGuestPageSizeExtEnabled(pVCpu);
3197 PGMNotifyNxeChanged(pVCpu, CPUMIsGuestNXEnabled(pVCpu));
3198 }
3199 }
3200 }
3201
3202 return rc;
3203}
3204
3205
3206/**
3207 * @callback_method_impl{FNSSMINTLOADDONE}
3208 */
3209static DECLCALLBACK(int) pgmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
3210{
3211 pVM->pgm.s.fRestoreRomPagesOnReset = true;
3212 NOREF(pSSM);
3213 return VINF_SUCCESS;
3214}
3215
3216
3217/**
3218 * Registers the saved state callbacks with SSM.
3219 *
3220 * @returns VBox status code.
3221 * @param pVM The cross context VM structure.
3222 * @param cbRam The RAM size.
3223 */
3224int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
3225{
3226 return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
3227 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
3228 NULL, pgmR3SaveExec, pgmR3SaveDone,
3229 pgmR3LoadPrep, pgmR3Load, pgmR3LoadDone);
3230}
3231
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette