VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp

Last change on this file was 108132, checked in by vboxsync, 8 weeks ago

VMM/PGM: Merge and deduplicate code targeting x86 & amd64 in PGM.cpp. Don't bother compiling pool stuff on arm and darwin.amd64. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 133.0 KB
Line 
1/* $Id: PGMSavedState.cpp 108132 2025-02-10 11:05:23Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, The Saved State Part.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/stam.h>
36#include <VBox/vmm/ssm.h>
37#include <VBox/vmm/pdmdrv.h>
38#include <VBox/vmm/pdmdev.h>
39#include "PGMInternal.h"
40#include <VBox/vmm/vmcc.h>
41#include "PGMInline.h"
42
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46#include <iprt/asm.h>
47#include <iprt/assert.h>
48#include <iprt/crc.h>
49#include <iprt/mem.h>
50#include <iprt/sha.h>
51#include <iprt/string.h>
52#include <iprt/thread.h>
53
54
55/*********************************************************************************************************************************
56* Defined Constants And Macros *
57*********************************************************************************************************************************/
58/** Saved state data unit version. */
59#define PGM_SAVED_STATE_VERSION 14
60/** Saved state data unit version before the PAE PDPE registers. */
61#define PGM_SAVED_STATE_VERSION_PRE_PAE 13
62/** Saved state data unit version after this includes ballooned page flags in
63 * the state (see @bugref{5515}). */
64#define PGM_SAVED_STATE_VERSION_BALLOON_BROKEN 12
65/** Saved state before the balloon change. */
66#define PGM_SAVED_STATE_VERSION_PRE_BALLOON 11
67/** Saved state data unit version used during 3.1 development, misses the RAM
68 * config. */
69#define PGM_SAVED_STATE_VERSION_NO_RAM_CFG 10
70/** Saved state data unit version for 3.0 (pre teleportation). */
71#define PGM_SAVED_STATE_VERSION_3_0_0 9
72/** Saved state data unit version for 2.2.2 and later. */
73#define PGM_SAVED_STATE_VERSION_2_2_2 8
74/** Saved state data unit version for 2.2.0. */
75#define PGM_SAVED_STATE_VERSION_RR_DESC 7
76/** Saved state data unit version. */
77#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6
78
79
80/** @name Sparse state record types
81 * @{ */
82/** Zero page. No data. */
83#define PGM_STATE_REC_RAM_ZERO UINT8_C(0x00)
84/** Raw page. */
85#define PGM_STATE_REC_RAM_RAW UINT8_C(0x01)
86/** Raw MMIO2 page. */
87#define PGM_STATE_REC_MMIO2_RAW UINT8_C(0x02)
88/** Zero MMIO2 page. */
89#define PGM_STATE_REC_MMIO2_ZERO UINT8_C(0x03)
90/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
91#define PGM_STATE_REC_ROM_VIRGIN UINT8_C(0x04)
92/** Raw shadowed ROM page. The protection (8-bit) precedes the raw bits. */
93#define PGM_STATE_REC_ROM_SHW_RAW UINT8_C(0x05)
94/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
95#define PGM_STATE_REC_ROM_SHW_ZERO UINT8_C(0x06)
96/** ROM protection (8-bit). */
97#define PGM_STATE_REC_ROM_PROT UINT8_C(0x07)
98/** Ballooned page. No data. */
99#define PGM_STATE_REC_RAM_BALLOONED UINT8_C(0x08)
100/** The last record type. */
101#define PGM_STATE_REC_LAST PGM_STATE_REC_RAM_BALLOONED
102/** End marker. */
103#define PGM_STATE_REC_END UINT8_C(0xff)
104/** Flag indicating that the data is preceded by the page address.
105 * For RAW pages this is a RTGCPHYS. For MMIO2 and ROM pages this is a 8-bit
106 * range ID and a 32-bit page index.
107 */
108#define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80)
109/** @} */
110
111/** The CRC-32 for a zero page. */
112#define PGM_STATE_CRC32_ZERO_PAGE UINT32_C(0xc71c0011)
113/** The CRC-32 for a zero half page. */
114#define PGM_STATE_CRC32_ZERO_HALF_PAGE UINT32_C(0xf1e8ba9e)
115
116
117
118/** @name Old Page types used in older saved states.
119 * @{ */
120/** Old saved state: The usual invalid zero entry. */
121#define PGMPAGETYPE_OLD_INVALID 0
122/** Old saved state: RAM page. (RWX) */
123#define PGMPAGETYPE_OLD_RAM 1
124/** Old saved state: MMIO2 page. (RWX) */
125#define PGMPAGETYPE_OLD_MMIO2 1
126/** Old saved state: MMIO2 page aliased over an MMIO page. (RWX)
127 * See PGMHandlerPhysicalPageAlias(). */
128#define PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO 2
129/** Old saved state: Shadowed ROM. (RWX) */
130#define PGMPAGETYPE_OLD_ROM_SHADOW 3
131/** Old saved state: ROM page. (R-X) */
132#define PGMPAGETYPE_OLD_ROM 4
133/** Old saved state: MMIO page. (---) */
134#define PGMPAGETYPE_OLD_MMIO 5
135/** @} */
136
137
138/*********************************************************************************************************************************
139* Structures and Typedefs *
140*********************************************************************************************************************************/
141/** For loading old saved states. (pre-smp) */
142typedef struct
143{
144 /** If set no conflict checks are required. (boolean) */
145 bool fMappingsFixed;
146 /** Size of fixed mapping */
147 uint32_t cbMappingFixed;
148 /** Base address (GC) of fixed mapping */
149 RTGCPTR GCPtrMappingFixed;
150 /** A20 gate mask.
151 * Our current approach to A20 emulation is to let REM do it and don't bother
152 * anywhere else. The interesting guests will be operating with it enabled anyway.
153 * But should the need arise, we'll subject physical addresses to this mask. */
154 RTGCPHYS GCPhysA20Mask;
155 /** A20 gate state - boolean! */
156 bool fA20Enabled;
157 /** The guest paging mode. */
158 PGMMODE enmGuestMode;
159} PGMOLD;
160
161
162/*********************************************************************************************************************************
163* Global Variables *
164*********************************************************************************************************************************/
165/** PGM fields to save/load. */
166
167static const SSMFIELD s_aPGMFields[] =
168{
169 SSMFIELD_ENTRY_OLD( fMappingsFixed, sizeof(bool)),
170 SSMFIELD_ENTRY_OLD_GCPTR( GCPtrMappingFixed),
171 SSMFIELD_ENTRY_OLD( cbMappingFixed, sizeof(uint32_t)),
172 SSMFIELD_ENTRY( PGM, cBalloonedPages),
173 SSMFIELD_ENTRY_TERM()
174};
175
176static const SSMFIELD s_aPGMFieldsPreBalloon[] =
177{
178 SSMFIELD_ENTRY_OLD( fMappingsFixed, sizeof(bool)),
179 SSMFIELD_ENTRY_OLD_GCPTR( GCPtrMappingFixed),
180 SSMFIELD_ENTRY_OLD( cbMappingFixed, sizeof(uint32_t)),
181 SSMFIELD_ENTRY_TERM()
182};
183
184static const SSMFIELD s_aPGMCpuFields[] =
185{
186 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
187 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
188 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
189 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[0]),
190 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[1]),
191 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[2]),
192 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[3]),
193 SSMFIELD_ENTRY_TERM()
194};
195
196static const SSMFIELD s_aPGMCpuFieldsPrePae[] =
197{
198 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
199 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
200 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
201 SSMFIELD_ENTRY_TERM()
202};
203
204static const SSMFIELD s_aPGMFields_Old[] =
205{
206 SSMFIELD_ENTRY( PGMOLD, fMappingsFixed),
207 SSMFIELD_ENTRY_GCPTR( PGMOLD, GCPtrMappingFixed),
208 SSMFIELD_ENTRY( PGMOLD, cbMappingFixed),
209 SSMFIELD_ENTRY( PGMOLD, fA20Enabled),
210 SSMFIELD_ENTRY_GCPHYS( PGMOLD, GCPhysA20Mask),
211 SSMFIELD_ENTRY( PGMOLD, enmGuestMode),
212 SSMFIELD_ENTRY_TERM()
213};
214
215
216/**
217 * Find the ROM tracking structure for the given page.
218 *
219 * @returns Pointer to the ROM page structure. NULL if the caller didn't check
220 * that it's a ROM page.
221 * @param pVM The cross context VM structure.
222 * @param GCPhys The address of the ROM page.
223 */
224static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
225{
226 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
227 for (uint32_t idx = 0; idx < cRomRanges; idx++)
228 {
229 PPGMROMRANGE const pRomRange = pVM->pgm.s.apRomRanges[idx];
230 RTGCPHYS const off = GCPhys - pRomRange->GCPhys;
231 if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
232 return &pRomRange->aPages[off >> GUEST_PAGE_SHIFT];
233 }
234 return NULL;
235}
236
237
238/**
239 * Prepares the ROM pages for a live save.
240 *
241 * @returns VBox status code.
242 * @param pVM The cross context VM structure.
243 */
244static int pgmR3PrepRomPages(PVM pVM)
245{
246 /*
247 * Initialize the live save tracking in the ROM page descriptors.
248 */
249 PGM_LOCK_VOID(pVM);
250 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
251 for (uint32_t idx = 0; idx < cRomRanges; idx++)
252 {
253 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
254 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
255 PPGMRAMRANGE pRamHint = NULL;
256
257 for (uint32_t iPage = 0; iPage < cPages; iPage++)
258 {
259 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)PGMROMPROT_INVALID;
260 pRom->aPages[iPage].LiveSave.fWrittenTo = false;
261 pRom->aPages[iPage].LiveSave.fDirty = true;
262 pRom->aPages[iPage].LiveSave.fDirtiedRecently = true;
263 if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
264 {
265 if (PGMROMPROT_IS_ROM(pRom->aPages[iPage].enmProt))
266 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
267 else
268 {
269 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
270 PPGMPAGE pPage;
271 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
272 AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys));
273 if (RT_SUCCESS(rc))
274 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(pPage) && !PGM_PAGE_IS_BALLOONED(pPage);
275 else
276 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
277 }
278 }
279 }
280
281 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
282 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
283 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
284 }
285 PGM_UNLOCK(pVM);
286
287 return VINF_SUCCESS;
288}
289
290
291/**
292 * Assigns IDs to the ROM ranges and saves them.
293 *
294 * @returns VBox status code.
295 * @param pVM The cross context VM structure.
296 * @param pSSM Saved state handle.
297 */
298static int pgmR3SaveRomRanges(PVM pVM, PSSMHANDLE pSSM)
299{
300 PGM_LOCK_VOID(pVM);
301 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
302 for (uint32_t idx = 0; idx < cRomRanges; idx++)
303 {
304 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
305 uint8_t const idSavedState = (uint8_t)(idx + 1);
306 pRom->idSavedState = idSavedState;
307 SSMR3PutU8(pSSM, idSavedState);
308 SSMR3PutStrZ(pSSM, ""); /* device name */
309 SSMR3PutU32(pSSM, 0); /* device instance */
310 SSMR3PutU8(pSSM, 0); /* region */
311 SSMR3PutStrZ(pSSM, pRom->pszDesc);
312 SSMR3PutGCPhys(pSSM, pRom->GCPhys);
313 int rc = SSMR3PutGCPhys(pSSM, pRom->cb);
314 if (RT_FAILURE(rc))
315 break;
316 }
317 PGM_UNLOCK(pVM);
318 return SSMR3PutU8(pSSM, UINT8_MAX);
319}
320
321
322/**
323 * Loads the ROM range ID assignments.
324 *
325 * @returns VBox status code.
326 *
327 * @param pVM The cross context VM structure.
328 * @param pSSM The saved state handle.
329 */
330static int pgmR3LoadRomRanges(PVM pVM, PSSMHANDLE pSSM)
331{
332 PGM_LOCK_ASSERT_OWNER(pVM);
333
334 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
335 for (uint32_t idx = 0; idx < cRomRanges; idx++)
336 pVM->pgm.s.apRomRanges[idx]->idSavedState = UINT8_MAX;
337
338 for (;;)
339 {
340 /*
341 * Read the data.
342 */
343 uint8_t id;
344 int rc = SSMR3GetU8(pSSM, &id);
345 if (RT_FAILURE(rc))
346 return rc;
347 if (id == UINT8_MAX)
348 {
349 /*
350 * End of ROM ranges. Check that all are accounted for.
351 */
352 for (uint32_t idx = 0; idx < cRomRanges; idx++)
353 {
354 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
355 if (pRom->idSavedState != UINT8_MAX)
356 { /* likely */ }
357 else if (pRom->fFlags & PGMPHYS_ROM_FLAGS_MAYBE_MISSING_FROM_STATE)
358 LogRel(("PGM: The '%s' ROM was not found in the saved state, but it is marked as maybe-missing, so that's probably okay.\n",
359 pRom->pszDesc));
360 else
361 AssertLogRelMsg(pRom->idSavedState != UINT8_MAX,
362 ("The '%s' ROM was not found in the saved state. Probably due to some misconfiguration\n",
363 pRom->pszDesc));
364 }
365 return VINF_SUCCESS; /* the end */
366 }
367 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
368
369 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
370 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
371 AssertLogRelRCReturn(rc, rc);
372
373 uint32_t uInstance;
374 SSMR3GetU32(pSSM, &uInstance);
375 uint8_t iRegion;
376 SSMR3GetU8(pSSM, &iRegion);
377
378 char szDesc[64];
379 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
380 AssertLogRelRCReturn(rc, rc);
381
382 RTGCPHYS GCPhys;
383 SSMR3GetGCPhys(pSSM, &GCPhys);
384 RTGCPHYS cb;
385 rc = SSMR3GetGCPhys(pSSM, &cb);
386 if (RT_FAILURE(rc))
387 return rc;
388 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
389 AssertLogRelMsgReturn(!(cb & GUEST_PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
390
391 /*
392 * Locate a matching ROM range.
393 */
394 AssertLogRelMsgReturn( uInstance == 0
395 && iRegion == 0
396 && szDevName[0] == '\0',
397 ("GCPhys=%RGp LB %RGp %s\n", GCPhys, cb, szDesc),
398 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
399 uint32_t idx;
400 for (idx = 0; idx < cRomRanges; idx++)
401 {
402 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
403 if ( pRom->idSavedState == UINT8_MAX
404 && !strcmp(pRom->pszDesc, szDesc))
405 {
406 pRom->idSavedState = id;
407 break;
408 }
409 }
410 if (idx >= cRomRanges)
411 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("ROM at %RGp LB %RGp by the name '%s' was not found"),
412 GCPhys, cb, szDesc);
413 } /* forever */
414}
415
416
417/**
418 * Scan ROM pages.
419 *
420 * @param pVM The cross context VM structure.
421 */
422static void pgmR3ScanRomPages(PVM pVM)
423{
424 /*
425 * The shadow ROMs.
426 */
427 PGM_LOCK_VOID(pVM);
428 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
429 for (uint32_t idx = 0; idx < cRomRanges; idx++)
430 {
431 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
432 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
433 {
434 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
435 for (uint32_t iPage = 0; iPage < cPages; iPage++)
436 {
437 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
438 if (pRomPage->LiveSave.fWrittenTo)
439 {
440 pRomPage->LiveSave.fWrittenTo = false;
441 if (!pRomPage->LiveSave.fDirty)
442 {
443 pRomPage->LiveSave.fDirty = true;
444 pVM->pgm.s.LiveSave.Rom.cReadyPages--;
445 pVM->pgm.s.LiveSave.Rom.cDirtyPages++;
446 }
447 pRomPage->LiveSave.fDirtiedRecently = true;
448 }
449 else
450 pRomPage->LiveSave.fDirtiedRecently = false;
451 }
452 }
453 }
454 PGM_UNLOCK(pVM);
455}
456
457
458/**
459 * Takes care of the virgin ROM pages in the first pass.
460 *
461 * This is an attempt at simplifying the handling of ROM pages a little bit.
462 * This ASSUMES that no new ROM ranges will be added and that they won't be
463 * relinked in any way.
464 *
465 * @param pVM The cross context VM structure.
466 * @param pSSM The SSM handle.
467 * @param fLiveSave Whether we're in a live save or not.
468 */
469static int pgmR3SaveRomVirginPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
470{
471 PGM_LOCK_VOID(pVM);
472 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
473 for (uint32_t idx = 0; idx < cRomRanges; idx++)
474 {
475 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
476 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
477 for (uint32_t iPage = 0; iPage < cPages; iPage++)
478 {
479 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
480 PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;
481
482 /* Get the virgin page descriptor. */
483 PPGMPAGE pPage;
484 if (PGMROMPROT_IS_ROM(enmProt))
485 pPage = pgmPhysGetPage(pVM, GCPhys);
486 else
487 pPage = &pRom->aPages[iPage].Virgin;
488
489 /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
490 int rc = VINF_SUCCESS;
491 char abPage[GUEST_PAGE_SIZE];
492 if ( !PGM_PAGE_IS_ZERO(pPage)
493 && !PGM_PAGE_IS_BALLOONED(pPage))
494 {
495 void const *pvPage;
496#ifdef VBOX_WITH_PGM_NEM_MODE
497 if (!PGMROMPROT_IS_ROM(enmProt) && PGM_IS_IN_NEM_MODE(pVM))
498 pvPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
499 else
500#endif
501 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
502 if (RT_SUCCESS(rc))
503 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
504 }
505 else
506 RT_ZERO(abPage);
507 PGM_UNLOCK(pVM);
508 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
509
510 /* Save it. */
511 if (iPage > 0)
512 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
513 else
514 {
515 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
516 SSMR3PutU8(pSSM, pRom->idSavedState);
517 SSMR3PutU32(pSSM, iPage);
518 }
519 SSMR3PutU8(pSSM, (uint8_t)enmProt);
520 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
521 if (RT_FAILURE(rc))
522 return rc;
523
524 /* Update state. */
525 PGM_LOCK_VOID(pVM);
526 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
527 if (fLiveSave)
528 {
529 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
530 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
531 pVM->pgm.s.LiveSave.cSavedPages++;
532 }
533 }
534 }
535 PGM_UNLOCK(pVM);
536 return VINF_SUCCESS;
537}
538
539
540/**
541 * Saves dirty pages in the shadowed ROM ranges.
542 *
543 * Used by pgmR3LiveExecPart2 and pgmR3SaveExecMemory.
544 *
545 * @returns VBox status code.
546 * @param pVM The cross context VM structure.
547 * @param pSSM The SSM handle.
548 * @param fLiveSave Whether it's a live save or not.
549 * @param fFinalPass Whether this is the final pass or not.
550 */
551static int pgmR3SaveShadowedRomPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
552{
553 /*
554 * The Shadowed ROMs.
555 *
556 * ASSUMES that the ROM ranges are fixed.
557 * ASSUMES that all the ROM ranges are mapped.
558 */
559 PGM_LOCK_VOID(pVM);
560 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
561 for (uint32_t idx = 0; idx < cRomRanges; idx++)
562 {
563 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
564 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
565 {
566 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
567 uint32_t iPrevPage = cPages;
568 for (uint32_t iPage = 0; iPage < cPages; iPage++)
569 {
570 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
571 if ( !fLiveSave
572 || ( pRomPage->LiveSave.fDirty
573 && ( ( !pRomPage->LiveSave.fDirtiedRecently
574 && !pRomPage->LiveSave.fWrittenTo)
575 || fFinalPass
576 )
577 )
578 )
579 {
580 uint8_t abPage[GUEST_PAGE_SIZE];
581 PGMROMPROT enmProt = pRomPage->enmProt;
582 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
583 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(pVM, GCPhys);
584 bool fZero = PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_BALLOONED(pPage); Assert(!PGM_PAGE_IS_BALLOONED(pPage)); /* Shouldn't be ballooned. */
585 int rc = VINF_SUCCESS;
586 if (!fZero)
587 {
588 void const *pvPage;
589#ifdef VBOX_WITH_PGM_NEM_MODE
590 if (PGMROMPROT_IS_ROM(enmProt) && PGM_IS_IN_NEM_MODE(pVM))
591 pvPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
592 else
593#endif
594 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
595 if (RT_SUCCESS(rc))
596 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
597 }
598 if (fLiveSave && RT_SUCCESS(rc))
599 {
600 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
601 pRomPage->LiveSave.fDirty = false;
602 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
603 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
604 pVM->pgm.s.LiveSave.cSavedPages++;
605 }
606 PGM_UNLOCK(pVM);
607 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
608
609 if (iPage - 1U == iPrevPage && iPage > 0)
610 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW));
611 else
612 {
613 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW) | PGM_STATE_REC_FLAG_ADDR);
614 SSMR3PutU8(pSSM, pRom->idSavedState);
615 SSMR3PutU32(pSSM, iPage);
616 }
617 rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
618 if (!fZero)
619 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
620 if (RT_FAILURE(rc))
621 return rc;
622
623 PGM_LOCK_VOID(pVM);
624 iPrevPage = iPage;
625 }
626 /*
627 * In the final pass, make sure the protection is in sync.
628 */
629 else if ( fFinalPass
630 && pRomPage->LiveSave.u8Prot != pRomPage->enmProt)
631 {
632 PGMROMPROT enmProt = pRomPage->enmProt;
633 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
634 PGM_UNLOCK(pVM);
635
636 if (iPage - 1U == iPrevPage && iPage > 0)
637 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT);
638 else
639 {
640 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT | PGM_STATE_REC_FLAG_ADDR);
641 SSMR3PutU8(pSSM, pRom->idSavedState);
642 SSMR3PutU32(pSSM, iPage);
643 }
644 int rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
645 if (RT_FAILURE(rc))
646 return rc;
647
648 PGM_LOCK_VOID(pVM);
649 iPrevPage = iPage;
650 }
651 }
652 }
653 }
654 PGM_UNLOCK(pVM);
655 return VINF_SUCCESS;
656}
657
658
659/**
660 * Cleans up ROM pages after a live save.
661 *
662 * @param pVM The cross context VM structure.
663 */
664static void pgmR3DoneRomPages(PVM pVM)
665{
666 NOREF(pVM);
667}
668
669
670/**
671 * Prepares the MMIO2 pages for a live save.
672 *
673 * @returns VBox status code.
674 * @param pVM The cross context VM structure.
675 */
676static int pgmR3PrepMmio2Pages(PVM pVM)
677{
678 /*
679 * Initialize the live save tracking in the MMIO2 ranges.
680 * ASSUME nothing changes here.
681 */
682 PGM_LOCK_VOID(pVM);
683 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
684 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
685 {
686 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
687 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
688 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT;
689 PGM_UNLOCK(pVM);
690
691 PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM,
692 sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
693 if (!paLSPages)
694 return VERR_NO_MEMORY;
695 for (uint32_t iPage = 0; iPage < cPages; iPage++)
696 {
697 /* Initialize it as a dirty zero page. */
698 paLSPages[iPage].fDirty = true;
699 paLSPages[iPage].cUnchangedScans = 0;
700 paLSPages[iPage].fZero = true;
701 paLSPages[iPage].u32CrcH1 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
702 paLSPages[iPage].u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
703 }
704
705 PGM_LOCK_VOID(pVM);
706 pRegMmio2->paLSPages = paLSPages;
707 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
708 }
709 PGM_UNLOCK(pVM);
710 return VINF_SUCCESS;
711}
712
713
714/**
715 * Assigns IDs to the MMIO2 ranges and saves them.
716 *
717 * @returns VBox status code.
718 * @param pVM The cross context VM structure.
719 * @param pSSM Saved state handle.
720 */
721static int pgmR3SaveMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
722{
723 PGM_LOCK_VOID(pVM);
724 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
725 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
726 {
727 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
728 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
729 uint8_t const idSavedState = (uint8_t)(idx + 1);
730 pRegMmio2->idSavedState = idSavedState;
731 SSMR3PutU8(pSSM, idSavedState);
732 SSMR3PutStrZ(pSSM, pRegMmio2->pDevInsR3->pReg->szName);
733 SSMR3PutU32(pSSM, pRegMmio2->pDevInsR3->iInstance);
734 SSMR3PutU8(pSSM, pRegMmio2->iRegion);
735 SSMR3PutStrZ(pSSM, pRamRange->pszDesc);
736 int rc = SSMR3PutGCPhys(pSSM, pRamRange->cb);
737 if (RT_FAILURE(rc))
738 break;
739 }
740 PGM_UNLOCK(pVM);
741 return SSMR3PutU8(pSSM, UINT8_MAX);
742}
743
744
745/**
746 * Loads the MMIO2 range ID assignments.
747 *
748 * @returns VBox status code.
749 *
750 * @param pVM The cross context VM structure.
751 * @param pSSM The saved state handle.
752 */
753static int pgmR3LoadMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
754{
755 PGM_LOCK_ASSERT_OWNER(pVM);
756
757 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
758 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
759 pVM->pgm.s.aMmio2Ranges[idx].idSavedState = UINT8_MAX;
760
761 for (;;)
762 {
763 /*
764 * Read the data.
765 */
766 uint8_t id;
767 int rc = SSMR3GetU8(pSSM, &id);
768 if (RT_FAILURE(rc))
769 return rc;
770 if (id == UINT8_MAX)
771 {
772 /*
773 * End of MMIO2 ranges. Check that all are accounted for.
774 */
775 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
776 AssertLogRelMsg(pVM->pgm.s.aMmio2Ranges[idx].idSavedState != UINT8_MAX,
777 ("%s\n", pVM->pgm.s.apMmio2RamRanges[idx]->pszDesc));
778 return VINF_SUCCESS; /* the end */
779 }
780 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
781
782 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
783 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
784 AssertLogRelRCReturn(rc, rc);
785
786 uint32_t uInstance;
787 SSMR3GetU32(pSSM, &uInstance);
788 uint8_t iRegion;
789 SSMR3GetU8(pSSM, &iRegion);
790
791 char szDesc[64];
792 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
793 AssertLogRelRCReturn(rc, rc);
794
795 RTGCPHYS cb;
796 rc = SSMR3GetGCPhys(pSSM, &cb);
797 AssertLogRelRCReturn(rc, rc);
798 AssertLogRelMsgReturn(!(cb & GUEST_PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
799
800 /*
801 * Locate a matching MMIO2 range.
802 */
803 uint32_t idx;
804 for (idx = 0; idx < cMmio2Ranges; idx++)
805 {
806 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
807 if ( pRegMmio2->idSavedState == UINT8_MAX
808 && pRegMmio2->iRegion == iRegion
809 && pRegMmio2->pDevInsR3->iInstance == uInstance
810 && !strcmp(pRegMmio2->pDevInsR3->pReg->szName, szDevName))
811 {
812 pRegMmio2->idSavedState = id;
813 break;
814 }
815 }
816 if (idx >= cMmio2Ranges)
817 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to locate a MMIO2 range called '%s' owned by %s/%u, region %d"),
818 szDesc, szDevName, uInstance, iRegion);
819
820 /*
821 * Validate the configuration, the size of the MMIO2 region should be
822 * the same.
823 */
824 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
825 if (cb != pRamRange->cb)
826 {
827 LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n", pRamRange->pszDesc, cb, pRamRange->cb));
828 if (cb > pRamRange->cb) /* bad idea? */
829 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp"),
830 pRamRange->pszDesc, cb, pRamRange->cb);
831 }
832 } /* forever */
833}
834
835
836/**
837 * Scans one MMIO2 page.
838 *
839 * @returns True if changed, false if unchanged.
840 *
841 * @param pVM The cross context VM structure.
842 * @param pbPage The page bits.
843 * @param pLSPage The live save tracking structure for the page.
844 *
845 */
846DECLINLINE(bool) pgmR3ScanMmio2Page(PVM pVM, uint8_t const *pbPage, PPGMLIVESAVEMMIO2PAGE pLSPage)
847{
848 /*
849 * Special handling of zero pages.
850 */
851 bool const fZero = pLSPage->fZero;
852 if (fZero)
853 {
854 if (ASMMemIsZero(pbPage, GUEST_PAGE_SIZE))
855 {
856 /* Not modified. */
857 if (pLSPage->fDirty)
858 pLSPage->cUnchangedScans++;
859 return false;
860 }
861
862 pLSPage->fZero = false;
863 pLSPage->u32CrcH1 = RTCrc32(pbPage, GUEST_PAGE_SIZE / 2);
864 }
865 else
866 {
867 /*
868 * CRC the first half, if it doesn't match the page is dirty and
869 * we won't check the 2nd half (we'll do that next time).
870 */
871 uint32_t u32CrcH1 = RTCrc32(pbPage, GUEST_PAGE_SIZE / 2);
872 if (u32CrcH1 == pLSPage->u32CrcH1)
873 {
874 uint32_t u32CrcH2 = RTCrc32(pbPage + GUEST_PAGE_SIZE / 2, GUEST_PAGE_SIZE / 2);
875 if (u32CrcH2 == pLSPage->u32CrcH2)
876 {
877 /* Probably not modified. */
878 if (pLSPage->fDirty)
879 pLSPage->cUnchangedScans++;
880 return false;
881 }
882
883 pLSPage->u32CrcH2 = u32CrcH2;
884 }
885 else
886 {
887 pLSPage->u32CrcH1 = u32CrcH1;
888 if ( u32CrcH1 == PGM_STATE_CRC32_ZERO_HALF_PAGE
889 && ASMMemIsZero(pbPage, GUEST_PAGE_SIZE))
890 {
891 pLSPage->u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
892 pLSPage->fZero = true;
893 }
894 }
895 }
896
897 /* dirty page path */
898 pLSPage->cUnchangedScans = 0;
899 if (!pLSPage->fDirty)
900 {
901 pLSPage->fDirty = true;
902 pVM->pgm.s.LiveSave.Mmio2.cReadyPages--;
903 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages++;
904 if (fZero)
905 pVM->pgm.s.LiveSave.Mmio2.cZeroPages--;
906 }
907 return true;
908}
909
910
911/**
912 * Scan for MMIO2 page modifications.
913 *
914 * @param pVM The cross context VM structure.
915 * @param uPass The pass number.
916 */
917static void pgmR3ScanMmio2Pages(PVM pVM, uint32_t uPass)
918{
919 /*
920 * Since this is a bit expensive we lower the scan rate after a little while.
921 */
922 if ( ( (uPass & 3) != 0
923 && uPass > 10)
924 || uPass == SSM_PASS_FINAL)
925 return;
926
927 PGM_LOCK_VOID(pVM); /* paranoia */
928 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
929 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
930 {
931 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
932 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio2->paLSPages;
933 uint32_t cPages = pVM->pgm.s.apMmio2RamRanges[idx]->cb >> GUEST_PAGE_SHIFT;
934 PGM_UNLOCK(pVM);
935
936 for (uint32_t iPage = 0; iPage < cPages; iPage++)
937 {
938 uint8_t const *pbPage = &pRegMmio2->pbR3[iPage * GUEST_PAGE_SIZE];
939 pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]);
940 }
941
942 PGM_LOCK_VOID(pVM);
943 }
944 PGM_UNLOCK(pVM);
945
946}
947
948
949/**
950 * Save quiescent MMIO2 pages.
951 *
952 * @returns VBox status code.
953 * @param pVM The cross context VM structure.
954 * @param pSSM The SSM handle.
955 * @param fLiveSave Whether it's a live save or not.
956 * @param uPass The pass number.
957 */
958static int pgmR3SaveMmio2Pages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
959{
960 /** @todo implement live saving of MMIO2 pages. (Need some way of telling the
961 * device that we wish to know about changes.) */
962
963 int rc = VINF_SUCCESS;
964 if (uPass == SSM_PASS_FINAL)
965 {
966 /*
967 * The mop up round.
968 */
969 PGM_LOCK_VOID(pVM);
970 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
971 for (uint32_t idx = 0; idx < cMmio2Ranges && RT_SUCCESS(rc); idx++)
972 {
973 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
974 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
975 PPGMLIVESAVEMMIO2PAGE const paLSPages = pRegMmio2->paLSPages;
976 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT;
977 uint8_t const *pbPage = pRamRange->pbR3;
978 uint32_t iPageLast = cPages;
979 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += GUEST_PAGE_SIZE)
980 {
981 uint8_t u8Type;
982 if (!fLiveSave)
983 u8Type = ASMMemIsZero(pbPage, GUEST_PAGE_SIZE) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
984 else
985 {
986 /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
987 if ( !paLSPages[iPage].fDirty
988 && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
989 {
990 if (paLSPages[iPage].fZero)
991 continue;
992
993 uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
994 RTSha1(pbPage, GUEST_PAGE_SIZE, abSha1Hash);
995 if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
996 continue;
997 }
998 u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
999 pVM->pgm.s.LiveSave.cSavedPages++;
1000 }
1001
1002 if (iPage != 0 && iPage == iPageLast + 1)
1003 rc = SSMR3PutU8(pSSM, u8Type);
1004 else
1005 {
1006 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
1007 SSMR3PutU8(pSSM, pRegMmio2->idSavedState);
1008 rc = SSMR3PutU32(pSSM, iPage);
1009 }
1010 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
1011 rc = SSMR3PutMem(pSSM, pbPage, GUEST_PAGE_SIZE);
1012 if (RT_FAILURE(rc))
1013 break;
1014 iPageLast = iPage;
1015 }
1016 }
1017 PGM_UNLOCK(pVM);
1018 }
1019 /*
1020 * Reduce the rate after a little while since the current MMIO2 approach is
1021 * a bit expensive.
1022 * We position it two passes after the scan pass to avoid saving busy pages.
1023 */
1024 else if ( uPass <= 10
1025 || (uPass & 3) == 2)
1026 {
1027 PGM_LOCK_VOID(pVM);
1028 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
1029 for (uint32_t idx = 0; idx < cMmio2Ranges && RT_SUCCESS(rc); idx++)
1030 {
1031 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
1032 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
1033 PPGMLIVESAVEMMIO2PAGE const paLSPages = pRegMmio2->paLSPages;
1034 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT;
1035 uint8_t const *pbPage = pRamRange->pbR3;
1036 uint32_t iPageLast = cPages;
1037 PGM_UNLOCK(pVM);
1038
1039 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += GUEST_PAGE_SIZE)
1040 {
1041 /* Skip clean pages and pages which hasn't quiesced. */
1042 if (!paLSPages[iPage].fDirty)
1043 continue;
1044 if (paLSPages[iPage].cUnchangedScans < 3)
1045 continue;
1046 if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
1047 continue;
1048
1049 /* Save it. */
1050 bool const fZero = paLSPages[iPage].fZero;
1051 uint8_t abPage[GUEST_PAGE_SIZE];
1052 if (!fZero)
1053 {
1054 memcpy(abPage, pbPage, GUEST_PAGE_SIZE);
1055 RTSha1(abPage, GUEST_PAGE_SIZE, paLSPages[iPage].abSha1Saved);
1056 }
1057
1058 uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
1059 if (iPage != 0 && iPage == iPageLast + 1)
1060 rc = SSMR3PutU8(pSSM, u8Type);
1061 else
1062 {
1063 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
1064 SSMR3PutU8(pSSM, pRegMmio2->idSavedState);
1065 rc = SSMR3PutU32(pSSM, iPage);
1066 }
1067 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
1068 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
1069 if (RT_FAILURE(rc))
1070 break;
1071
1072 /* Housekeeping. */
1073 paLSPages[iPage].fDirty = false;
1074 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
1075 pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
1076 if (u8Type == PGM_STATE_REC_MMIO2_ZERO)
1077 pVM->pgm.s.LiveSave.Mmio2.cZeroPages++;
1078 pVM->pgm.s.LiveSave.cSavedPages++;
1079 iPageLast = iPage;
1080 }
1081
1082 PGM_LOCK_VOID(pVM);
1083 }
1084 PGM_UNLOCK(pVM);
1085 }
1086
1087 return rc;
1088}
1089
1090
1091/**
1092 * Cleans up MMIO2 pages after a live save.
1093 *
1094 * @param pVM The cross context VM structure.
1095 */
1096static void pgmR3DoneMmio2Pages(PVM pVM)
1097{
1098 /*
1099 * Free the tracking structures for the MMIO2 pages.
1100 * We do the freeing outside the lock in case the VM is running.
1101 */
1102 PGM_LOCK_VOID(pVM);
1103 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
1104 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
1105 {
1106 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
1107 void *pvMmio2ToFree = pRegMmio2->paLSPages;
1108 if (pvMmio2ToFree)
1109 {
1110 pRegMmio2->paLSPages = NULL;
1111 PGM_UNLOCK(pVM);
1112 MMR3HeapFree(pvMmio2ToFree);
1113 PGM_LOCK_VOID(pVM);
1114 }
1115 }
1116 PGM_UNLOCK(pVM);
1117}
1118
1119
1120/**
1121 * Prepares the RAM pages for a live save.
1122 *
1123 * @returns VBox status code.
1124 * @param pVM The cross context VM structure.
1125 */
1126static int pgmR3PrepRamPages(PVM pVM)
1127{
1128
1129 /*
1130 * Try allocating tracking structures for the ram ranges.
1131 *
1132 * To avoid lock contention, we leave the lock every time we're allocating
1133 * a new array. This means we'll have to ditch the allocation and start
1134 * all over again if the RAM range list changes in-between.
1135 *
1136 * Note! pgmR3SaveDone will always be called and it is therefore responsible
1137 * for cleaning up.
1138 */
1139 PGM_LOCK_VOID(pVM);
1140 uint32_t idRamRange;
1141 do
1142 {
1143 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U);
1144 for (idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++)
1145 {
1146 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange];
1147 Assert(pCur || idRamRange == 0);
1148 if (!pCur) continue;
1149 Assert(pCur->idRange == idRamRange);
1150
1151 if ( !pCur->paLSPages
1152 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1153 {
1154 uint32_t const idRamRangesGen = pVM->pgm.s.RamRangeUnion.idGeneration;
1155 uint32_t const cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1156 PGM_UNLOCK(pVM);
1157 PPGMLIVESAVERAMPAGE paLSPages = (PPGMLIVESAVERAMPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVERAMPAGE));
1158 if (!paLSPages)
1159 return VERR_NO_MEMORY;
1160 PGM_LOCK_VOID(pVM);
1161 if (pVM->pgm.s.RamRangeUnion.idGeneration != idRamRangesGen)
1162 {
1163 PGM_UNLOCK(pVM);
1164 MMR3HeapFree(paLSPages);
1165 PGM_LOCK_VOID(pVM);
1166 break; /* try again */
1167 }
1168 pCur->paLSPages = paLSPages;
1169
1170 /*
1171 * Initialize the array.
1172 */
1173 uint32_t iPage = cPages;
1174 while (iPage-- > 0)
1175 {
1176 /** @todo yield critsect! (after moving this away from EMT0) */
1177 PCPGMPAGE pPage = &pCur->aPages[iPage];
1178 paLSPages[iPage].cDirtied = 0;
1179 paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */
1180 paLSPages[iPage].fWriteMonitored = 0;
1181 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1182 paLSPages[iPage].u2Reserved = 0;
1183 switch (PGM_PAGE_GET_TYPE(pPage))
1184 {
1185 case PGMPAGETYPE_RAM:
1186 if ( PGM_PAGE_IS_ZERO(pPage)
1187 || PGM_PAGE_IS_BALLOONED(pPage))
1188 {
1189 paLSPages[iPage].fZero = 1;
1190 paLSPages[iPage].fShared = 0;
1191#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1192 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1193#endif
1194 }
1195 else if (PGM_PAGE_IS_SHARED(pPage))
1196 {
1197 paLSPages[iPage].fZero = 0;
1198 paLSPages[iPage].fShared = 1;
1199#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1200 paLSPages[iPage].u32Crc = UINT32_MAX;
1201#endif
1202 }
1203 else
1204 {
1205 paLSPages[iPage].fZero = 0;
1206 paLSPages[iPage].fShared = 0;
1207#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1208 paLSPages[iPage].u32Crc = UINT32_MAX;
1209#endif
1210 }
1211 paLSPages[iPage].fIgnore = 0;
1212 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1213 break;
1214
1215 case PGMPAGETYPE_ROM_SHADOW:
1216 case PGMPAGETYPE_ROM:
1217 {
1218 paLSPages[iPage].fZero = 0;
1219 paLSPages[iPage].fShared = 0;
1220 paLSPages[iPage].fDirty = 0;
1221 paLSPages[iPage].fIgnore = 1;
1222#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1223 paLSPages[iPage].u32Crc = UINT32_MAX;
1224#endif
1225 pVM->pgm.s.LiveSave.cIgnoredPages++;
1226 break;
1227 }
1228
1229 default:
1230 AssertMsgFailed(("%R[pgmpage]", pPage));
1231 RT_FALL_THRU();
1232 case PGMPAGETYPE_MMIO2:
1233 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1234 paLSPages[iPage].fZero = 0;
1235 paLSPages[iPage].fShared = 0;
1236 paLSPages[iPage].fDirty = 0;
1237 paLSPages[iPage].fIgnore = 1;
1238#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1239 paLSPages[iPage].u32Crc = UINT32_MAX;
1240#endif
1241 pVM->pgm.s.LiveSave.cIgnoredPages++;
1242 break;
1243
1244 case PGMPAGETYPE_MMIO:
1245 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
1246 paLSPages[iPage].fZero = 0;
1247 paLSPages[iPage].fShared = 0;
1248 paLSPages[iPage].fDirty = 0;
1249 paLSPages[iPage].fIgnore = 1;
1250#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1251 paLSPages[iPage].u32Crc = UINT32_MAX;
1252#endif
1253 pVM->pgm.s.LiveSave.cIgnoredPages++;
1254 break;
1255 }
1256 }
1257 }
1258 }
1259 } while (idRamRange <= RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U));
1260 PGM_UNLOCK(pVM);
1261
1262 return VINF_SUCCESS;
1263}
1264
1265
1266/**
1267 * Saves the RAM configuration.
1268 *
1269 * @returns VBox status code.
1270 * @param pVM The cross context VM structure.
1271 * @param pSSM The saved state handle.
1272 */
1273static int pgmR3SaveRamConfig(PVM pVM, PSSMHANDLE pSSM)
1274{
1275 uint32_t cbRamHole = 0;
1276 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
1277 AssertRCReturn(rc, rc);
1278
1279 uint64_t cbRam = 0;
1280 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
1281 AssertRCReturn(rc, rc);
1282
1283 SSMR3PutU32(pSSM, cbRamHole);
1284 return SSMR3PutU64(pSSM, cbRam);
1285}
1286
1287
1288/**
1289 * Loads and verifies the RAM configuration.
1290 *
1291 * @returns VBox status code.
1292 * @param pVM The cross context VM structure.
1293 * @param pSSM The saved state handle.
1294 */
1295static int pgmR3LoadRamConfig(PVM pVM, PSSMHANDLE pSSM)
1296{
1297 uint32_t cbRamHoleCfg = 0;
1298 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHoleCfg, MM_RAM_HOLE_SIZE_DEFAULT);
1299 AssertRCReturn(rc, rc);
1300
1301 uint64_t cbRamCfg = 0;
1302 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRamCfg, 0);
1303 AssertRCReturn(rc, rc);
1304
1305 uint32_t cbRamHoleSaved;
1306 SSMR3GetU32(pSSM, &cbRamHoleSaved);
1307
1308 uint64_t cbRamSaved;
1309 rc = SSMR3GetU64(pSSM, &cbRamSaved);
1310 AssertRCReturn(rc, rc);
1311
1312 if ( cbRamHoleCfg != cbRamHoleSaved
1313 || cbRamCfg != cbRamSaved)
1314 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Ram config mismatch: saved=%RX64/%RX32 config=%RX64/%RX32 (RAM/Hole)"),
1315 cbRamSaved, cbRamHoleSaved, cbRamCfg, cbRamHoleCfg);
1316 return VINF_SUCCESS;
1317}
1318
1319#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1320
1321/**
1322 * Calculates the CRC-32 for a RAM page and updates the live save page tracking
1323 * info with it.
1324 *
1325 * @param pVM The cross context VM structure.
1326 * @param pCur The current RAM range.
1327 * @param paLSPages The current array of live save page tracking
1328 * structures.
1329 * @param iPage The page index.
1330 */
1331static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
1332{
1333 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1334 PGMPAGEMAPLOCK PgMpLck;
1335 void const *pvPage;
1336 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1337 if (RT_SUCCESS(rc))
1338 {
1339 paLSPages[iPage].u32Crc = RTCrc32(pvPage, GUEST_PAGE_SIZE);
1340 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1341 }
1342 else
1343 paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */
1344}
1345
1346
1347/**
1348 * Verifies the CRC-32 for a page given it's raw bits.
1349 *
1350 * @param pvPage The page bits.
1351 * @param pCur The current RAM range.
1352 * @param paLSPages The current array of live save page tracking
1353 * structures.
1354 * @param iPage The page index.
1355 */
1356static void pgmR3StateVerifyCrc32ForPage(void const *pvPage, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1357{
1358 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1359 {
1360 uint32_t u32Crc = RTCrc32(pvPage, GUEST_PAGE_SIZE);
1361 Assert( ( !PGM_PAGE_IS_ZERO(&pCur->aPages[iPage])
1362 && !PGM_PAGE_IS_BALLOONED(&pCur->aPages[iPage]))
1363 || u32Crc == PGM_STATE_CRC32_ZERO_PAGE);
1364 AssertMsg(paLSPages[iPage].u32Crc == u32Crc,
1365 ("%08x != %08x for %RGp %R[pgmpage] %s\n", paLSPages[iPage].u32Crc, u32Crc,
1366 pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage], pszWhere));
1367 }
1368}
1369
1370
1371/**
1372 * Verifies the CRC-32 for a RAM page.
1373 *
1374 * @param pVM The cross context VM structure.
1375 * @param pCur The current RAM range.
1376 * @param paLSPages The current array of live save page tracking
1377 * structures.
1378 * @param iPage The page index.
1379 */
1380static void pgmR3StateVerifyCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1381{
1382 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1383 {
1384 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1385 PGMPAGEMAPLOCK PgMpLck;
1386 void const *pvPage;
1387 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1388 if (RT_SUCCESS(rc))
1389 {
1390 pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage, pszWhere);
1391 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1392 }
1393 }
1394}
1395
1396#endif /* PGMLIVESAVERAMPAGE_WITH_CRC32 */
1397
1398/**
1399 * Scan for RAM page modifications and reprotect them.
1400 *
1401 * @param pVM The cross context VM structure.
1402 * @param fFinalPass Whether this is the final pass or not.
1403 */
1404static void pgmR3ScanRamPages(PVM pVM, bool fFinalPass)
1405{
1406 /*
1407 * The RAM.
1408 */
1409 RTGCPHYS GCPhysCur = 0;
1410 uint32_t idxLookup;
1411 uint32_t cLookupEntries;
1412 PGM_LOCK_VOID(pVM);
1413 do
1414 {
1415 PGM::PGMRAMRANGEGENANDLOOKUPCOUNT const RamRangeUnion = pVM->pgm.s.RamRangeUnion;
1416 Assert(pVM->pgm.s.RamRangeUnion.cLookupEntries < RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
1417 cLookupEntries = pVM->pgm.s.RamRangeUnion.cLookupEntries;
1418 for (idxLookup = 0; idxLookup < cLookupEntries; idxLookup++)
1419 {
1420 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
1421 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
1422 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange];
1423 AssertContinue(pCur);
1424 Assert(pCur->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]));
1425
1426 if ( pCur->GCPhysLast > GCPhysCur
1427 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1428 {
1429 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1430 uint32_t cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1431 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> GUEST_PAGE_SHIFT;
1432 GCPhysCur = 0;
1433 for (; iPage < cPages; iPage++)
1434 {
1435 /* Do yield first. */
1436 if ( !fFinalPass
1437#ifndef PGMLIVESAVERAMPAGE_WITH_CRC32
1438 && (iPage & 0x7ff) == 0x100
1439#endif
1440 && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX)
1441 && pVM->pgm.s.RamRangeUnion.u64Combined != RamRangeUnion.u64Combined)
1442 {
1443 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1444 break; /* restart */
1445 }
1446
1447 /* Skip already ignored pages. */
1448 if (paLSPages[iPage].fIgnore)
1449 continue;
1450
1451 if (RT_LIKELY(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM))
1452 {
1453 /*
1454 * A RAM page.
1455 */
1456 switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
1457 {
1458 case PGM_PAGE_STATE_ALLOCATED:
1459 /** @todo Optimize this: Don't always re-enable write
1460 * monitoring if the page is known to be very busy. */
1461 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1462 {
1463 AssertMsg(paLSPages[iPage].fWriteMonitored,
1464 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage]));
1465 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1466 Assert(pVM->pgm.s.cWrittenToPages > 0);
1467 pVM->pgm.s.cWrittenToPages--;
1468 }
1469 else
1470 {
1471 AssertMsg(!paLSPages[iPage].fWriteMonitored,
1472 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage]));
1473 pVM->pgm.s.LiveSave.Ram.cMonitoredPages++;
1474 }
1475
1476 if (!paLSPages[iPage].fDirty)
1477 {
1478 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1479 if (paLSPages[iPage].fZero)
1480 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1481 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1482 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1483 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1484 }
1485
1486 pgmPhysPageWriteMonitor(pVM, &pCur->aPages[iPage],
1487 pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT));
1488 paLSPages[iPage].fWriteMonitored = 1;
1489 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1490 paLSPages[iPage].fDirty = 1;
1491 paLSPages[iPage].fZero = 0;
1492 paLSPages[iPage].fShared = 0;
1493#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1494 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1495#endif
1496 break;
1497
1498 case PGM_PAGE_STATE_WRITE_MONITORED:
1499 Assert(paLSPages[iPage].fWriteMonitored);
1500 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) == 0)
1501 {
1502#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1503 if (paLSPages[iPage].fWriteMonitoredJustNow)
1504 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1505 else
1506 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "scan");
1507#endif
1508 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1509 }
1510 else
1511 {
1512 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1513#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1514 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1515#endif
1516 if (!paLSPages[iPage].fDirty)
1517 {
1518 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1519 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1520 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1521 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1522 }
1523 }
1524 break;
1525
1526 case PGM_PAGE_STATE_ZERO:
1527 case PGM_PAGE_STATE_BALLOONED:
1528 if (!paLSPages[iPage].fZero)
1529 {
1530 if (!paLSPages[iPage].fDirty)
1531 {
1532 paLSPages[iPage].fDirty = 1;
1533 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1534 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1535 }
1536 paLSPages[iPage].fZero = 1;
1537 paLSPages[iPage].fShared = 0;
1538#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1539 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1540#endif
1541 }
1542 break;
1543
1544 case PGM_PAGE_STATE_SHARED:
1545 if (!paLSPages[iPage].fShared)
1546 {
1547 if (!paLSPages[iPage].fDirty)
1548 {
1549 paLSPages[iPage].fDirty = 1;
1550 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1551 if (paLSPages[iPage].fZero)
1552 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1553 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1554 }
1555 paLSPages[iPage].fZero = 0;
1556 paLSPages[iPage].fShared = 1;
1557#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1558 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1559#endif
1560 }
1561 break;
1562 }
1563 }
1564 else
1565 {
1566 /*
1567 * All other types => Ignore the page.
1568 */
1569 Assert(!paLSPages[iPage].fIgnore); /* skipped before switch */
1570 paLSPages[iPage].fIgnore = 1;
1571 if (paLSPages[iPage].fWriteMonitored)
1572 {
1573 /** @todo this doesn't hold water when we start monitoring MMIO2 and ROM shadow
1574 * pages! */
1575 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
1576 {
1577 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
1578 PGM_PAGE_SET_STATE(pVM, &pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
1579 Assert(pVM->pgm.s.cMonitoredPages > 0);
1580 pVM->pgm.s.cMonitoredPages--;
1581 }
1582 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1583 {
1584 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1585 Assert(pVM->pgm.s.cWrittenToPages > 0);
1586 pVM->pgm.s.cWrittenToPages--;
1587 }
1588 pVM->pgm.s.LiveSave.Ram.cMonitoredPages--;
1589 }
1590
1591 /** @todo the counting doesn't quite work out here. fix later? */
1592 if (paLSPages[iPage].fDirty)
1593 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1594 else
1595 {
1596 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1597 if (paLSPages[iPage].fZero)
1598 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1599 }
1600 pVM->pgm.s.LiveSave.cIgnoredPages++;
1601 }
1602 } /* for each page in range */
1603
1604 if (GCPhysCur != 0)
1605 break; /* Yield + ramrange change */
1606 GCPhysCur = pCur->GCPhysLast;
1607 }
1608 } /* for each range */
1609
1610 /* We must use the starting lookup count here to determine whether we've
1611 been thru all or not, since using the current count could lead us to
1612 skip the final range if one was umapped while we yielded the lock. */
1613 } while (idxLookup < cLookupEntries);
1614 PGM_UNLOCK(pVM);
1615}
1616
1617
1618/**
1619 * Save quiescent RAM pages.
1620 *
1621 * @returns VBox status code.
1622 * @param pVM The cross context VM structure.
1623 * @param pSSM The SSM handle.
1624 * @param fLiveSave Whether it's a live save or not.
1625 * @param uPass The pass number.
1626 */
1627static int pgmR3SaveRamPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
1628{
1629 NOREF(fLiveSave);
1630
1631 /*
1632 * The RAM.
1633 */
1634 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
1635 RTGCPHYS GCPhysCur = 0;
1636 uint32_t idxLookup;
1637 uint32_t cRamRangeLookupEntries;
1638
1639 PGM_LOCK_VOID(pVM);
1640 do
1641 {
1642 uint32_t const idRamRangesGen = pVM->pgm.s.RamRangeUnion.idGeneration;
1643 cRamRangeLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
1644 for (idxLookup = 0; idxLookup < cRamRangeLookupEntries; idxLookup++)
1645 {
1646 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
1647 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
1648 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange];
1649 AssertContinue(pCur);
1650 Assert(pCur->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]));
1651
1652 if ( pCur->GCPhysLast > GCPhysCur
1653 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1654 {
1655 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1656 uint32_t cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1657 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> GUEST_PAGE_SHIFT;
1658 GCPhysCur = 0;
1659 for (; iPage < cPages; iPage++)
1660 {
1661 /* Do yield first. */
1662 if ( uPass != SSM_PASS_FINAL
1663 && (iPage & 0x7ff) == 0x100
1664 && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX)
1665 && pVM->pgm.s.RamRangeUnion.idGeneration != idRamRangesGen)
1666 {
1667 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1668 break; /* restart */
1669 }
1670
1671 PPGMPAGE pCurPage = &pCur->aPages[iPage];
1672
1673 /*
1674 * Only save pages that haven't changed since last scan and are dirty.
1675 */
1676 if ( uPass != SSM_PASS_FINAL
1677 && paLSPages)
1678 {
1679 if (!paLSPages[iPage].fDirty)
1680 continue;
1681 if (paLSPages[iPage].fWriteMonitoredJustNow)
1682 continue;
1683 if (paLSPages[iPage].fIgnore)
1684 continue;
1685 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM) /* in case of recent remappings */
1686 continue;
1687 if ( PGM_PAGE_GET_STATE(pCurPage)
1688 != ( paLSPages[iPage].fZero
1689 ? PGM_PAGE_STATE_ZERO
1690 : paLSPages[iPage].fShared
1691 ? PGM_PAGE_STATE_SHARED
1692 : PGM_PAGE_STATE_WRITE_MONITORED))
1693 continue;
1694 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) > 0)
1695 continue;
1696 }
1697 else
1698 {
1699 if ( paLSPages
1700 && !paLSPages[iPage].fDirty
1701 && !paLSPages[iPage].fIgnore)
1702 {
1703#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1704 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1705 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#1");
1706#endif
1707 continue;
1708 }
1709 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1710 continue;
1711 }
1712
1713 /*
1714 * Do the saving outside the PGM critsect since SSM may block on I/O.
1715 */
1716 int rc;
1717 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1718 bool fZero = PGM_PAGE_IS_ZERO(pCurPage);
1719 bool fBallooned = PGM_PAGE_IS_BALLOONED(pCurPage);
1720 bool fSkipped = false;
1721
1722 if (!fZero && !fBallooned)
1723 {
1724 /*
1725 * Copy the page and then save it outside the lock (since any
1726 * SSM call may block).
1727 */
1728 uint8_t abPage[GUEST_PAGE_SIZE];
1729 PGMPAGEMAPLOCK PgMpLck;
1730 void const *pvPage;
1731 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage, &PgMpLck);
1732 if (RT_SUCCESS(rc))
1733 {
1734 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
1735#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1736 if (paLSPages)
1737 pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage, "save#3");
1738#endif
1739 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1740 }
1741 PGM_UNLOCK(pVM);
1742 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
1743
1744 /* Try save some memory when restoring. */
1745 if (!ASMMemIsZero(pvPage, GUEST_PAGE_SIZE))
1746 {
1747 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1748 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
1749 else
1750 {
1751 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
1752 SSMR3PutGCPhys(pSSM, GCPhys);
1753 }
1754 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
1755 }
1756 else
1757 {
1758 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1759 rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO);
1760 else
1761 {
1762 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO | PGM_STATE_REC_FLAG_ADDR);
1763 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1764 }
1765 }
1766 }
1767 else
1768 {
1769 /*
1770 * Dirty zero or ballooned page.
1771 */
1772#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1773 if (paLSPages)
1774 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#2");
1775#endif
1776 PGM_UNLOCK(pVM);
1777
1778 uint8_t u8RecType = fBallooned ? PGM_STATE_REC_RAM_BALLOONED : PGM_STATE_REC_RAM_ZERO;
1779 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1780 rc = SSMR3PutU8(pSSM, u8RecType);
1781 else
1782 {
1783 SSMR3PutU8(pSSM, u8RecType | PGM_STATE_REC_FLAG_ADDR);
1784 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1785 }
1786 }
1787 if (RT_FAILURE(rc))
1788 return rc;
1789
1790 PGM_LOCK_VOID(pVM);
1791 if (!fSkipped)
1792 GCPhysLast = GCPhys;
1793 if (paLSPages)
1794 {
1795 paLSPages[iPage].fDirty = 0;
1796 pVM->pgm.s.LiveSave.Ram.cReadyPages++;
1797 if (fZero)
1798 pVM->pgm.s.LiveSave.Ram.cZeroPages++;
1799 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1800 pVM->pgm.s.LiveSave.cSavedPages++;
1801 }
1802 if (idRamRangesGen != pVM->pgm.s.RamRangeUnion.idGeneration)
1803 {
1804 GCPhysCur = GCPhys | GUEST_PAGE_OFFSET_MASK;
1805 break; /* restart */
1806 }
1807
1808 } /* for each page in range */
1809
1810 if (GCPhysCur != 0)
1811 break; /* Yield + ramrange change */
1812 GCPhysCur = pCur->GCPhysLast;
1813 }
1814 } /* for each range */
1815
1816 /* We must use the starting lookup count here to determine whether we've
1817 been thru all or not, since using the current count could lead us to
1818 skip the final range if one was umapped while we yielded the lock. */
1819 } while (idxLookup < cRamRangeLookupEntries);
1820
1821 PGM_UNLOCK(pVM);
1822
1823 return VINF_SUCCESS;
1824}
1825
1826
1827/**
1828 * Cleans up RAM pages after a live save.
1829 *
1830 * @param pVM The cross context VM structure.
1831 */
1832static void pgmR3DoneRamPages(PVM pVM)
1833{
1834 /*
1835 * Free the tracking arrays and disable write monitoring.
1836 *
1837 * Play nice with the PGM lock in case we're called while the VM is still
1838 * running. This means we have to delay the freeing since we wish to use
1839 * paLSPages as an indicator of which RAM ranges which we need to scan for
1840 * write monitored pages.
1841 */
1842 void *pvToFree = NULL;
1843 uint32_t cMonitoredPages = 0;
1844 uint32_t idRamRangeMax;
1845 uint32_t idRamRange;
1846 PGM_LOCK_VOID(pVM);
1847 do
1848 {
1849 idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U);
1850 for (idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++)
1851 {
1852 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange];
1853 Assert(pCur || idRamRange == 0);
1854 if (!pCur) continue;
1855 Assert(pCur->idRange == idRamRange);
1856
1857 if (pCur->paLSPages)
1858 {
1859 if (pvToFree)
1860 {
1861 uint32_t const idRamRangesGen = pVM->pgm.s.RamRangeUnion.idGeneration;
1862 PGM_UNLOCK(pVM);
1863 MMR3HeapFree(pvToFree);
1864 pvToFree = NULL;
1865 PGM_LOCK_VOID(pVM);
1866 if (idRamRangesGen != pVM->pgm.s.RamRangeUnion.idGeneration)
1867 break; /* start over again. */
1868 }
1869
1870 pvToFree = pCur->paLSPages;
1871 pCur->paLSPages = NULL;
1872
1873 uint32_t iPage = pCur->cb >> GUEST_PAGE_SHIFT;
1874 while (iPage--)
1875 {
1876 PPGMPAGE pPage = &pCur->aPages[iPage];
1877 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
1878 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1879 {
1880 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1881 cMonitoredPages++;
1882 }
1883 }
1884 }
1885 }
1886 } while (idRamRange <= idRamRangeMax);
1887
1888 Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages);
1889 if (pVM->pgm.s.cMonitoredPages < cMonitoredPages)
1890 pVM->pgm.s.cMonitoredPages = 0;
1891 else
1892 pVM->pgm.s.cMonitoredPages -= cMonitoredPages;
1893
1894 PGM_UNLOCK(pVM);
1895
1896 MMR3HeapFree(pvToFree);
1897 pvToFree = NULL;
1898}
1899
1900
1901/**
1902 * @callback_method_impl{FNSSMINTLIVEEXEC}
1903 */
1904static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1905{
1906 int rc;
1907
1908 /*
1909 * Save the MMIO2 and ROM range IDs in pass 0.
1910 */
1911 if (uPass == 0)
1912 {
1913 rc = pgmR3SaveRamConfig(pVM, pSSM);
1914 if (RT_FAILURE(rc))
1915 return rc;
1916 rc = pgmR3SaveRomRanges(pVM, pSSM);
1917 if (RT_FAILURE(rc))
1918 return rc;
1919 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1920 if (RT_FAILURE(rc))
1921 return rc;
1922 }
1923 /*
1924 * Reset the page-per-second estimate to avoid inflation by the initial
1925 * load of zero pages. pgmR3LiveVote ASSUMES this is done at pass 7.
1926 */
1927 else if (uPass == 7)
1928 {
1929 pVM->pgm.s.LiveSave.cSavedPages = 0;
1930 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
1931 }
1932
1933 /*
1934 * Do the scanning.
1935 */
1936 pgmR3ScanRomPages(pVM);
1937 pgmR3ScanMmio2Pages(pVM, uPass);
1938 pgmR3ScanRamPages(pVM, false /*fFinalPass*/);
1939#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
1940 pgmR3PoolClearAll(pVM, true /*fFlushRemTlb*/); /** @todo this could perhaps be optimized a bit. */
1941#endif
1942
1943 /*
1944 * Save the pages.
1945 */
1946 if (uPass == 0)
1947 rc = pgmR3SaveRomVirginPages( pVM, pSSM, true /*fLiveSave*/);
1948 else
1949 rc = VINF_SUCCESS;
1950 if (RT_SUCCESS(rc))
1951 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
1952 if (RT_SUCCESS(rc))
1953 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, uPass);
1954 if (RT_SUCCESS(rc))
1955 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, uPass);
1956 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes care of it.) */
1957
1958 return rc;
1959}
1960
1961
1962/**
1963 * @callback_method_impl{FNSSMINTLIVEVOTE}
1964 */
1965static DECLCALLBACK(int) pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1966{
1967 /*
1968 * Update and calculate parameters used in the decision making.
1969 */
1970 const uint32_t cHistoryEntries = RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory);
1971
1972 /* update history. */
1973 PGM_LOCK_VOID(pVM);
1974 uint32_t const cWrittenToPages = pVM->pgm.s.cWrittenToPages;
1975 PGM_UNLOCK(pVM);
1976 uint32_t const cDirtyNow = pVM->pgm.s.LiveSave.Rom.cDirtyPages
1977 + pVM->pgm.s.LiveSave.Mmio2.cDirtyPages
1978 + pVM->pgm.s.LiveSave.Ram.cDirtyPages
1979 + cWrittenToPages;
1980 uint32_t i = pVM->pgm.s.LiveSave.iDirtyPagesHistory;
1981 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = cDirtyNow;
1982 pVM->pgm.s.LiveSave.iDirtyPagesHistory = (i + 1) % cHistoryEntries;
1983
1984 /* calc shortterm average (4 passes). */
1985 AssertCompile(RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory) > 4);
1986 uint64_t cTotal = pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1987 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 1) % cHistoryEntries];
1988 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 2) % cHistoryEntries];
1989 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 3) % cHistoryEntries];
1990 uint32_t const cDirtyPagesShort = cTotal / 4;
1991 pVM->pgm.s.LiveSave.cDirtyPagesShort = cDirtyPagesShort;
1992
1993 /* calc longterm average. */
1994 cTotal = 0;
1995 if (uPass < cHistoryEntries)
1996 for (i = 0; i < cHistoryEntries && i <= uPass; i++)
1997 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1998 else
1999 for (i = 0; i < cHistoryEntries; i++)
2000 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
2001 uint32_t const cDirtyPagesLong = cTotal / cHistoryEntries;
2002 pVM->pgm.s.LiveSave.cDirtyPagesLong = cDirtyPagesLong;
2003
2004 /* estimate the speed */
2005 uint64_t cNsElapsed = RTTimeNanoTS() - pVM->pgm.s.LiveSave.uSaveStartNS;
2006 uint32_t cPagesPerSecond = (uint32_t)( (long double)pVM->pgm.s.LiveSave.cSavedPages
2007 / ((long double)cNsElapsed / 1000000000.0) );
2008 pVM->pgm.s.LiveSave.cPagesPerSecond = cPagesPerSecond;
2009
2010 /*
2011 * Try make a decision.
2012 */
2013 if ( cDirtyPagesShort <= cDirtyPagesLong
2014 && ( cDirtyNow <= cDirtyPagesShort
2015 || cDirtyNow - cDirtyPagesShort < RT_MIN(cDirtyPagesShort / 8, 16)
2016 )
2017 )
2018 {
2019 if (uPass > 10)
2020 {
2021 uint32_t cMsLeftShort = (uint32_t)(cDirtyPagesShort / (long double)cPagesPerSecond * 1000.0);
2022 uint32_t cMsLeftLong = (uint32_t)(cDirtyPagesLong / (long double)cPagesPerSecond * 1000.0);
2023 uint32_t cMsMaxDowntime = SSMR3HandleMaxDowntime(pSSM);
2024 if (cMsMaxDowntime < 32)
2025 cMsMaxDowntime = 32;
2026 if ( ( cMsLeftLong <= cMsMaxDowntime
2027 && cMsLeftShort < cMsMaxDowntime)
2028 || cMsLeftShort < cMsMaxDowntime / 2
2029 )
2030 {
2031 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u|%ums cDirtyPagesLong=%u|%ums cMsMaxDowntime=%u\n",
2032 uPass, cDirtyPagesShort, cMsLeftShort, cDirtyPagesLong, cMsLeftLong, cMsMaxDowntime));
2033 return VINF_SUCCESS;
2034 }
2035 }
2036 else
2037 {
2038 if ( ( cDirtyPagesShort <= 128
2039 && cDirtyPagesLong <= 1024)
2040 || cDirtyPagesLong <= 256
2041 )
2042 {
2043 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u cDirtyPagesLong=%u\n", uPass, cDirtyPagesShort, cDirtyPagesLong));
2044 return VINF_SUCCESS;
2045 }
2046 }
2047 }
2048
2049 /*
2050 * Come up with a completion percentage. Currently this is a simple
2051 * dirty page (long term) vs. total pages ratio + some pass trickery.
2052 */
2053 unsigned uPctDirty = (unsigned)( (long double)cDirtyPagesLong
2054 / (pVM->pgm.s.cAllPages - pVM->pgm.s.LiveSave.cIgnoredPages - pVM->pgm.s.cZeroPages) );
2055 if (uPctDirty <= 100)
2056 SSMR3HandleReportLivePercent(pSSM, RT_MIN(100 - uPctDirty, uPass * 2));
2057 else
2058 AssertMsgFailed(("uPctDirty=%u cDirtyPagesLong=%#x cAllPages=%#x cIgnoredPages=%#x cZeroPages=%#x\n",
2059 uPctDirty, cDirtyPagesLong, pVM->pgm.s.cAllPages, pVM->pgm.s.LiveSave.cIgnoredPages, pVM->pgm.s.cZeroPages));
2060
2061 return VINF_SSM_VOTE_FOR_ANOTHER_PASS;
2062}
2063
2064
2065/**
2066 * @callback_method_impl{FNSSMINTLIVEPREP}
2067 *
2068 * This will attempt to allocate and initialize the tracking structures. It
2069 * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
2070 * pgmR3SaveDone will do the cleanups.
2071 */
2072static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
2073{
2074 /*
2075 * Indicate that we will be using the write monitoring.
2076 */
2077 PGM_LOCK_VOID(pVM);
2078 /** @todo find a way of mediating this when more users are added. */
2079 if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
2080 {
2081 PGM_UNLOCK(pVM);
2082 AssertLogRelFailedReturn(VERR_PGM_WRITE_MONITOR_ENGAGED);
2083 }
2084 pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
2085 PGM_UNLOCK(pVM);
2086
2087 /*
2088 * Initialize the statistics.
2089 */
2090 pVM->pgm.s.LiveSave.Rom.cReadyPages = 0;
2091 pVM->pgm.s.LiveSave.Rom.cDirtyPages = 0;
2092 pVM->pgm.s.LiveSave.Mmio2.cReadyPages = 0;
2093 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages = 0;
2094 pVM->pgm.s.LiveSave.Ram.cReadyPages = 0;
2095 pVM->pgm.s.LiveSave.Ram.cDirtyPages = 0;
2096 pVM->pgm.s.LiveSave.cIgnoredPages = 0;
2097 pVM->pgm.s.LiveSave.fActive = true;
2098 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory); i++)
2099 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = UINT32_MAX / 2;
2100 pVM->pgm.s.LiveSave.iDirtyPagesHistory = 0;
2101 pVM->pgm.s.LiveSave.cSavedPages = 0;
2102 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
2103 pVM->pgm.s.LiveSave.cPagesPerSecond = 8192;
2104
2105 /*
2106 * Per page type.
2107 */
2108 int rc = pgmR3PrepRomPages(pVM);
2109 if (RT_SUCCESS(rc))
2110 rc = pgmR3PrepMmio2Pages(pVM);
2111 if (RT_SUCCESS(rc))
2112 rc = pgmR3PrepRamPages(pVM);
2113
2114 NOREF(pSSM);
2115 return rc;
2116}
2117
2118
2119/**
2120 * @callback_method_impl{FNSSMINTSAVEEXEC}
2121 */
2122static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
2123{
2124 PPGM pPGM = &pVM->pgm.s;
2125
2126 /*
2127 * Lock PGM and set the no-more-writes indicator.
2128 */
2129 PGM_LOCK_VOID(pVM);
2130 pVM->pgm.s.fNoMorePhysWrites = true;
2131
2132 /*
2133 * Save basic data (required / unaffected by relocation).
2134 */
2135 int rc = SSMR3PutStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFields[0], NULL /*pvUser*/);
2136
2137 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus && RT_SUCCESS(rc); idCpu++)
2138 rc = SSMR3PutStruct(pSSM, &pVM->apCpusR3[idCpu]->pgm.s, &s_aPGMCpuFields[0]);
2139
2140 /*
2141 * Save the (remainder of the) memory.
2142 */
2143 if (RT_SUCCESS(rc))
2144 {
2145 if (pVM->pgm.s.LiveSave.fActive)
2146 {
2147 pgmR3ScanRomPages(pVM);
2148 pgmR3ScanMmio2Pages(pVM, SSM_PASS_FINAL);
2149 pgmR3ScanRamPages(pVM, true /*fFinalPass*/);
2150
2151 rc = pgmR3SaveShadowedRomPages( pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
2152 if (RT_SUCCESS(rc))
2153 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2154 if (RT_SUCCESS(rc))
2155 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2156 }
2157 else
2158 {
2159 rc = pgmR3SaveRamConfig(pVM, pSSM);
2160 if (RT_SUCCESS(rc))
2161 rc = pgmR3SaveRomRanges(pVM, pSSM);
2162 if (RT_SUCCESS(rc))
2163 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
2164 if (RT_SUCCESS(rc))
2165 rc = pgmR3SaveRomVirginPages( pVM, pSSM, false /*fLiveSave*/);
2166 if (RT_SUCCESS(rc))
2167 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
2168 if (RT_SUCCESS(rc))
2169 rc = pgmR3SaveMmio2Pages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2170 if (RT_SUCCESS(rc))
2171 rc = pgmR3SaveRamPages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2172 }
2173 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
2174 }
2175
2176 PGM_UNLOCK(pVM);
2177 return rc;
2178}
2179
2180
2181/**
2182 * @callback_method_impl{FNSSMINTSAVEDONE}
2183 */
2184static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
2185{
2186 /*
2187 * Do per page type cleanups first.
2188 */
2189 if (pVM->pgm.s.LiveSave.fActive)
2190 {
2191 pgmR3DoneRomPages(pVM);
2192 pgmR3DoneMmio2Pages(pVM);
2193 pgmR3DoneRamPages(pVM);
2194 }
2195
2196 /*
2197 * Clear the live save indicator and disengage write monitoring.
2198 */
2199 PGM_LOCK_VOID(pVM);
2200 pVM->pgm.s.LiveSave.fActive = false;
2201 /** @todo this is blindly assuming that we're the only user of write
2202 * monitoring. Fix this when more users are added. */
2203 pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
2204 PGM_UNLOCK(pVM);
2205
2206 NOREF(pSSM);
2207 return VINF_SUCCESS;
2208}
2209
2210
2211/**
2212 * @callback_method_impl{FNSSMINTLOADPREP}
2213 */
2214static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
2215{
2216 /*
2217 * Call the reset function to make sure all the memory is cleared.
2218 */
2219 PGMR3Reset(pVM);
2220 pVM->pgm.s.LiveSave.fActive = false;
2221 NOREF(pSSM);
2222 return VINF_SUCCESS;
2223}
2224
2225
2226/**
2227 * Load an ignored page.
2228 *
2229 * @returns VBox status code.
2230 * @param pSSM The saved state handle.
2231 */
2232static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
2233{
2234 uint8_t abPage[GUEST_PAGE_SIZE];
2235 return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
2236}
2237
2238
2239/**
2240 * Compares a page with an old save type value.
2241 *
2242 * @returns true if equal, false if not.
2243 * @param pPage The page to compare.
2244 * @param uOldType The old type value from the saved state.
2245 */
2246DECLINLINE(bool) pgmR3CompareNewAndOldPageTypes(PPGMPAGE pPage, uint8_t uOldType)
2247{
2248 uint8_t uOldPageType;
2249 switch (PGM_PAGE_GET_TYPE(pPage))
2250 {
2251 case PGMPAGETYPE_INVALID: uOldPageType = PGMPAGETYPE_OLD_INVALID; break;
2252 case PGMPAGETYPE_RAM: uOldPageType = PGMPAGETYPE_OLD_RAM; break;
2253 case PGMPAGETYPE_MMIO2: uOldPageType = PGMPAGETYPE_OLD_MMIO2; break;
2254 case PGMPAGETYPE_MMIO2_ALIAS_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO; break;
2255 case PGMPAGETYPE_ROM_SHADOW: uOldPageType = PGMPAGETYPE_OLD_ROM_SHADOW; break;
2256 case PGMPAGETYPE_ROM: uOldPageType = PGMPAGETYPE_OLD_ROM; break;
2257 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: RT_FALL_THRU();
2258 case PGMPAGETYPE_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO; break;
2259 default:
2260 AssertFailed();
2261 uOldPageType = PGMPAGETYPE_OLD_INVALID;
2262 break;
2263 }
2264 return uOldPageType == uOldType;
2265}
2266
2267
2268/**
2269 * Loads a page without any bits in the saved state, i.e. making sure it's
2270 * really zero.
2271 *
2272 * @returns VBox status code.
2273 * @param pVM The cross context VM structure.
2274 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2275 * state).
2276 * @param pPage The guest page tracking structure.
2277 * @param GCPhys The page address.
2278 * @param pRam The ram range (logging).
2279 */
2280static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2281{
2282 if ( uOldType != PGMPAGETYPE_OLD_INVALID
2283 && !pgmR3CompareNewAndOldPageTypes(pPage, uOldType))
2284 return VERR_SSM_UNEXPECTED_DATA;
2285
2286 /* I think this should be sufficient. */
2287 if ( !PGM_PAGE_IS_ZERO(pPage)
2288 && !PGM_PAGE_IS_BALLOONED(pPage))
2289 return VERR_SSM_UNEXPECTED_DATA;
2290
2291 NOREF(pVM);
2292 NOREF(GCPhys);
2293 NOREF(pRam);
2294 return VINF_SUCCESS;
2295}
2296
2297
2298/**
2299 * Loads a page from the saved state.
2300 *
2301 * @returns VBox status code.
2302 * @param pVM The cross context VM structure.
2303 * @param pSSM The SSM handle.
2304 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2305 * state).
2306 * @param pPage The guest page tracking structure.
2307 * @param GCPhys The page address.
2308 * @param pRam The ram range (logging).
2309 */
2310static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2311{
2312 /*
2313 * Match up the type, dealing with MMIO2 aliases (dropped).
2314 */
2315 AssertLogRelMsgReturn( uOldType == PGMPAGETYPE_INVALID
2316 || pgmR3CompareNewAndOldPageTypes(pPage, uOldType)
2317 /* kudge for the expanded PXE bios (r67885) - @bugref{5687}: */
2318 || ( uOldType == PGMPAGETYPE_OLD_RAM
2319 && GCPhys >= 0xed000
2320 && GCPhys <= 0xeffff
2321 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM)
2322 ,
2323 ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
2324 VERR_SSM_UNEXPECTED_DATA);
2325
2326 /*
2327 * Load the page.
2328 */
2329 PGMPAGEMAPLOCK PgMpLck;
2330 void *pvPage;
2331 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
2332 if (RT_SUCCESS(rc))
2333 {
2334 rc = SSMR3GetMem(pSSM, pvPage, GUEST_PAGE_SIZE);
2335 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2336 }
2337
2338 return rc;
2339}
2340
2341
2342/**
2343 * Loads a page (counter part to pgmR3SavePage).
2344 *
2345 * @returns VBox status code, fully bitched errors.
2346 * @param pVM The cross context VM structure.
2347 * @param pSSM The SSM handle.
2348 * @param uOldType The page type.
2349 * @param pPage The page.
2350 * @param GCPhys The page address.
2351 * @param pRam The RAM range (for error messages).
2352 */
2353static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2354{
2355 uint8_t uState;
2356 int rc = SSMR3GetU8(pSSM, &uState);
2357 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
2358 if (uState == 0 /* zero */)
2359 rc = pgmR3LoadPageZeroOld(pVM, uOldType, pPage, GCPhys, pRam);
2360 else if (uState == 1)
2361 rc = pgmR3LoadPageBitsOld(pVM, pSSM, uOldType, pPage, GCPhys, pRam);
2362 else
2363 rc = VERR_PGM_INVALID_SAVED_PAGE_STATE;
2364 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uOldType=%d GCPhys=%RGp %s rc=%Rrc\n",
2365 pPage, uState, uOldType, GCPhys, pRam->pszDesc, rc),
2366 rc);
2367 return VINF_SUCCESS;
2368}
2369
2370
2371/**
2372 * Loads a shadowed ROM page.
2373 *
2374 * @returns VBox status code, errors are fully bitched.
2375 * @param pVM The cross context VM structure.
2376 * @param pSSM The saved state handle.
2377 * @param pPage The page.
2378 * @param GCPhys The page address.
2379 * @param pRam The RAM range (for error messages).
2380 */
2381static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2382{
2383 /*
2384 * Load and set the protection first, then load the two pages, the first
2385 * one is the active the other is the passive.
2386 */
2387 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
2388 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2389
2390 uint8_t uProt;
2391 int rc = SSMR3GetU8(pSSM, &uProt);
2392 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
2393 PGMROMPROT enmProt = (PGMROMPROT)uProt;
2394 AssertLogRelMsgReturn( enmProt >= PGMROMPROT_INVALID
2395 && enmProt < PGMROMPROT_END,
2396 ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
2397 VERR_SSM_UNEXPECTED_DATA);
2398
2399 if (pRomPage->enmProt != enmProt)
2400 {
2401 rc = PGMR3PhysRomProtect(pVM, GCPhys, GUEST_PAGE_SIZE, enmProt);
2402 AssertLogRelRCReturn(rc, rc);
2403 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2404 }
2405
2406 PPGMPAGE pPageActive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2407 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2408 uint8_t u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM : PGMPAGETYPE_ROM_SHADOW;
2409 uint8_t u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
2410
2411 /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
2412 * used down the line (will the 2nd page will be written to the first
2413 * one because of a false TLB hit since the TLB is using GCPhys and
2414 * doesn't check the HCPhys of the desired page). */
2415 rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
2416 if (RT_SUCCESS(rc))
2417 {
2418 *pPageActive = *pPage;
2419 rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
2420 }
2421 return rc;
2422}
2423
2424
2425/**
2426 * Ram range flags and bits for older versions of the saved state.
2427 *
2428 * @returns VBox status code.
2429 *
2430 * @param pVM The cross context VM structure.
2431 * @param pSSM The SSM handle.
2432 * @param uVersion The saved state version.
2433 */
2434static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2435{
2436 /*
2437 * Ram range flags and bits.
2438 */
2439 uint32_t iSeqNo = 0;
2440 uint32_t const cRamRangeLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries,
2441 RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
2442 for (uint32_t idxLookup = 0; idxLookup < cRamRangeLookupEntries; idxLookup++)
2443 {
2444 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
2445 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
2446 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange];
2447 AssertContinue(pRam);
2448
2449 /* Check the sequence number / separator. */
2450 uint32_t u32Sep;
2451 int rc = SSMR3GetU32(pSSM, &u32Sep);
2452 if (RT_FAILURE(rc))
2453 return rc;
2454 if (u32Sep == ~0U)
2455 break;
2456 if (u32Sep != iSeqNo)
2457 {
2458 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2459 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2460 }
2461 AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2462
2463 /* Get the range details. */
2464 RTGCPHYS GCPhys;
2465 SSMR3GetGCPhys(pSSM, &GCPhys);
2466 RTGCPHYS GCPhysLast;
2467 SSMR3GetGCPhys(pSSM, &GCPhysLast);
2468 RTGCPHYS cb;
2469 SSMR3GetGCPhys(pSSM, &cb);
2470 uint8_t fHaveBits;
2471 rc = SSMR3GetU8(pSSM, &fHaveBits);
2472 if (RT_FAILURE(rc))
2473 return rc;
2474 if (fHaveBits & ~1)
2475 {
2476 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2477 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2478 }
2479 size_t cchDesc = 0;
2480 char szDesc[256];
2481 szDesc[0] = '\0';
2482 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2483 {
2484 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
2485 if (RT_FAILURE(rc))
2486 return rc;
2487 /* Since we've modified the description strings in r45878, only compare
2488 them if the saved state is more recent. */
2489 if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
2490 cchDesc = strlen(szDesc);
2491 }
2492
2493 /*
2494 * Match it up with the current range.
2495 *
2496 * Note there is a hack for dealing with the high BIOS mapping
2497 * in the old saved state format, this means we might not have
2498 * a 1:1 match on success.
2499 */
2500 if ( ( GCPhys != pRam->GCPhys
2501 || GCPhysLast != pRam->GCPhysLast
2502 || cb != pRam->cb
2503 || ( cchDesc
2504 && strcmp(szDesc, pRam->pszDesc)) )
2505 /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
2506 && ( uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
2507 || GCPhys != UINT32_C(0xfff80000)
2508 || GCPhysLast != UINT32_C(0xffffffff)
2509 || pRam->GCPhysLast != GCPhysLast
2510 || pRam->GCPhys < GCPhys
2511 || !fHaveBits)
2512 )
2513 {
2514 LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
2515 "State : %RGp-%RGp %RGp bytes %s %s\n",
2516 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pbR3 ? "bits" : "nobits", pRam->pszDesc,
2517 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
2518 /*
2519 * If we're loading a state for debugging purpose, don't make a fuss if
2520 * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
2521 */
2522 if ( SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
2523 || GCPhys < 8 * _1M)
2524 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2525 N_("RAM range mismatch; saved={%RGp-%RGp %RGp bytes %s %s} config={%RGp-%RGp %RGp bytes %s %s}"),
2526 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc,
2527 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pbR3 ? "bits" : "nobits", pRam->pszDesc);
2528
2529 AssertMsgFailed(("debug skipping not implemented, sorry\n"));
2530 iSeqNo++;
2531 continue;
2532 }
2533
2534 uint32_t cPages = (GCPhysLast - GCPhys + 1) >> GUEST_PAGE_SHIFT;
2535 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2536 {
2537 /*
2538 * Load the pages one by one.
2539 */
2540 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2541 {
2542 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2543 PPGMPAGE pPage = &pRam->aPages[iPage];
2544 uint8_t uOldType;
2545 rc = SSMR3GetU8(pSSM, &uOldType);
2546 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
2547 if (uOldType == PGMPAGETYPE_OLD_ROM_SHADOW)
2548 rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
2549 else
2550 rc = pgmR3LoadPageOld(pVM, pSSM, uOldType, pPage, GCPhysPage, pRam);
2551 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2552 }
2553 }
2554 else
2555 {
2556 /*
2557 * Old format.
2558 */
2559
2560 /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
2561 The rest is generally irrelevant and wrong since the stuff have to match registrations. */
2562 uint32_t fFlags = 0;
2563 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2564 {
2565 uint16_t u16Flags;
2566 rc = SSMR3GetU16(pSSM, &u16Flags);
2567 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2568 fFlags |= u16Flags;
2569 }
2570
2571 /* Load the bits */
2572 if ( !fHaveBits
2573 && GCPhysLast < UINT32_C(0xe0000000))
2574 {
2575 /*
2576 * Dynamic chunks.
2577 */
2578 const uint32_t cPagesInChunk = (1*1024*1024) >> GUEST_PAGE_SHIFT;
2579 AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
2580 ("cPages=%#x cPagesInChunk=%#x GCPhys=%RGp %s\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
2581 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2582
2583 for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
2584 {
2585 uint8_t fPresent;
2586 rc = SSMR3GetU8(pSSM, &fPresent);
2587 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2588 AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
2589 ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
2590 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2591
2592 for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
2593 {
2594 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2595 PPGMPAGE pPage = &pRam->aPages[iPage];
2596 if (fPresent)
2597 {
2598 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO
2599 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
2600 rc = pgmR3LoadPageToDevNullOld(pSSM);
2601 else
2602 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2603 }
2604 else
2605 rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2606 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2607 }
2608 }
2609 }
2610 else if (pRam->pbR3)
2611 {
2612 /*
2613 * MMIO2.
2614 */
2615 AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
2616 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2617 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2618 AssertLogRelMsgReturn(pRam->pbR3,
2619 ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
2620 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2621
2622 rc = SSMR3GetMem(pSSM, pRam->pbR3, pRam->cb);
2623 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
2624 }
2625 else if (GCPhysLast < UINT32_C(0xfff80000))
2626 {
2627 /*
2628 * PCI MMIO, no pages saved.
2629 */
2630 }
2631 else
2632 {
2633 /*
2634 * Load the 0xfff80000..0xffffffff BIOS range.
2635 * It starts with X reserved pages that we have to skip over since
2636 * the RAMRANGE create by the new code won't include those.
2637 */
2638 AssertLogRelMsgReturn( !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
2639 && (fFlags & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
2640 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2641 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2642 AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
2643 ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
2644 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2645
2646 /* Skip wasted reserved pages before the ROM. */
2647 while (GCPhys < pRam->GCPhys)
2648 {
2649 rc = pgmR3LoadPageToDevNullOld(pSSM);
2650 AssertLogRelRCReturn(rc, rc);
2651 GCPhys += GUEST_PAGE_SIZE;
2652 }
2653
2654 /* Load the bios pages. */
2655 cPages = pRam->cb >> GUEST_PAGE_SHIFT;
2656 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2657 {
2658 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2659 PPGMPAGE pPage = &pRam->aPages[iPage];
2660
2661 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
2662 ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
2663 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2664 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
2665 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2666 }
2667 }
2668 }
2669
2670 iSeqNo++;
2671 }
2672
2673 return VINF_SUCCESS;
2674}
2675
2676
2677/**
2678 * Worker for pgmR3Load and pgmR3LoadLocked.
2679 *
2680 * @returns VBox status code.
2681 *
2682 * @param pVM The cross context VM structure.
2683 * @param pSSM The SSM handle.
2684 * @param uVersion The PGM saved state unit version.
2685 * @param uPass The pass number.
2686 *
2687 * @todo This needs splitting up if more record types or code twists are
2688 * added...
2689 */
2690static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2691{
2692 NOREF(uPass);
2693
2694 /*
2695 * Process page records until we hit the terminator.
2696 */
2697 RTGCPHYS GCPhys = NIL_RTGCPHYS;
2698 PPGMRAMRANGE pRamHint = NULL;
2699 uint8_t id = UINT8_MAX;
2700 uint32_t iPage = UINT32_MAX - 10;
2701 PPGMROMRANGE pRom = NULL;
2702 PPGMREGMMIO2RANGE pRegMmio2 = NULL;
2703 PPGMRAMRANGE pMmio2RamRange = NULL;
2704
2705 /*
2706 * We batch up pages that should be freed instead of calling GMM for
2707 * each and every one of them. Note that we'll lose the pages in most
2708 * failure paths - this should probably be addressed one day.
2709 */
2710 uint32_t cPendingPages = 0;
2711 PGMMFREEPAGESREQ pReq;
2712 int rc = GMMR3FreePagesPrepare(pVM, &pReq, 128 /* batch size */, GMMACCOUNT_BASE);
2713 AssertLogRelRCReturn(rc, rc);
2714
2715 for (;;)
2716 {
2717 /*
2718 * Get the record type and flags.
2719 */
2720 uint8_t u8;
2721 rc = SSMR3GetU8(pSSM, &u8);
2722 if (RT_FAILURE(rc))
2723 return rc;
2724 if (u8 == PGM_STATE_REC_END)
2725 {
2726 /*
2727 * Finish off any pages pending freeing.
2728 */
2729 if (cPendingPages)
2730 {
2731 Log(("pgmR3LoadMemory: GMMR3FreePagesPerform pVM=%p cPendingPages=%u\n", pVM, cPendingPages));
2732 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2733 AssertLogRelRCReturn(rc, rc);
2734 }
2735 GMMR3FreePagesCleanup(pReq);
2736 return VINF_SUCCESS;
2737 }
2738 AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2739 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2740 {
2741 /*
2742 * RAM page.
2743 */
2744 case PGM_STATE_REC_RAM_ZERO:
2745 case PGM_STATE_REC_RAM_RAW:
2746 case PGM_STATE_REC_RAM_BALLOONED:
2747 {
2748 /*
2749 * Get the address and resolve it into a page descriptor.
2750 */
2751 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2752 GCPhys += GUEST_PAGE_SIZE;
2753 else
2754 {
2755 rc = SSMR3GetGCPhys(pSSM, &GCPhys);
2756 if (RT_FAILURE(rc))
2757 return rc;
2758 }
2759 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2760
2761 PPGMPAGE pPage;
2762 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
2763 if (RT_SUCCESS(rc))
2764 { /* likely */ }
2765 else if ( rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS
2766 && GCPhys < _1M
2767 && GCPhys >= 640U*_1K
2768 && (u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_RAM_ZERO)
2769 {
2770 rc = VINF_SUCCESS; /* We've kicked out unused pages between 640K and 1MB, but older states may include them. */
2771 id = UINT8_MAX;
2772 break;
2773 }
2774 else
2775 AssertLogRelMsgFailedReturn(("rc=%Rrc %RGp u8=%#x\n", rc, GCPhys, u8), rc);
2776
2777 /*
2778 * Take action according to the record type.
2779 */
2780 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2781 {
2782 case PGM_STATE_REC_RAM_ZERO:
2783 {
2784 if (PGM_PAGE_IS_ZERO(pPage))
2785 break;
2786
2787 /* Ballooned pages must be unmarked (live snapshot and
2788 teleportation scenarios). */
2789 if (PGM_PAGE_IS_BALLOONED(pPage))
2790 {
2791 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2792 if (uVersion == PGM_SAVED_STATE_VERSION_BALLOON_BROKEN)
2793 break;
2794 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2795 break;
2796 }
2797
2798 AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_UNEXPECTED_PAGE_STATE);
2799
2800 /* If this is a ROM page, we must clear it and not try to
2801 * free it. Ditto if the VM is using RamPreAlloc (see
2802 * @bugref{6318}). */
2803 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM
2804 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM_SHADOW
2805 || PGM_IS_IN_NEM_MODE(pVM)
2806 || pVM->pgm.s.fRamPreAlloc)
2807 {
2808 PGMPAGEMAPLOCK PgMpLck;
2809 void *pvDstPage;
2810 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2811 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2812
2813 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
2814 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2815 }
2816 /* Free it only if it's not part of a previously
2817 allocated large page (no need to clear the page). */
2818 else if ( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2819 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED)
2820 {
2821 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2822 AssertRCReturn(rc, rc);
2823 }
2824 /** @todo handle large pages (see @bugref{5545}) */
2825 break;
2826 }
2827
2828 case PGM_STATE_REC_RAM_BALLOONED:
2829 {
2830 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2831 if (PGM_PAGE_IS_BALLOONED(pPage))
2832 break;
2833
2834 /* We don't map ballooned pages in our shadow page tables, let's
2835 just free it if allocated and mark as ballooned. See @bugref{5515}. */
2836 if (PGM_PAGE_IS_ALLOCATED(pPage))
2837 {
2838 /** @todo handle large pages + ballooning when it works. (see @bugref{5515},
2839 * @bugref{5545}). */
2840 AssertLogRelMsgReturn( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2841 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED,
2842 ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_LOAD_UNEXPECTED_PAGE_TYPE);
2843
2844 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2845 AssertRCReturn(rc, rc);
2846 }
2847 Assert(PGM_PAGE_IS_ZERO(pPage));
2848 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
2849 break;
2850 }
2851
2852 case PGM_STATE_REC_RAM_RAW:
2853 {
2854 PGMPAGEMAPLOCK PgMpLck;
2855 void *pvDstPage;
2856 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2857 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2858 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
2859 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2860 if (RT_FAILURE(rc))
2861 return rc;
2862 break;
2863 }
2864
2865 default:
2866 AssertMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
2867 }
2868 id = UINT8_MAX;
2869 break;
2870 }
2871
2872 /*
2873 * MMIO2 page.
2874 */
2875 case PGM_STATE_REC_MMIO2_RAW:
2876 case PGM_STATE_REC_MMIO2_ZERO:
2877 {
2878 /*
2879 * Get the ID + page number and resolved that into a MMIO2 page.
2880 */
2881 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2882 iPage++;
2883 else
2884 {
2885 SSMR3GetU8(pSSM, &id);
2886 rc = SSMR3GetU32(pSSM, &iPage);
2887 if (RT_FAILURE(rc))
2888 return rc;
2889 }
2890 if ( !pRegMmio2
2891 || pRegMmio2->idSavedState != id)
2892 {
2893 pMmio2RamRange = NULL;
2894 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
2895 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
2896 if (pVM->pgm.s.aMmio2Ranges[idx].idSavedState == id)
2897 {
2898 pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
2899 pMmio2RamRange = pVM->pgm.s.apMmio2RamRanges[idx];
2900 break;
2901 }
2902 AssertLogRelMsgReturn(pRegMmio2 && pMmio2RamRange, ("id=%#u iPage=%#x\n", id, iPage),
2903 VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND);
2904 }
2905 AssertLogRelMsgReturn(iPage < (pMmio2RamRange->cb >> GUEST_PAGE_SHIFT),
2906 ("iPage=%#x cb=%RGp %s\n", iPage, pMmio2RamRange->cb, pMmio2RamRange->pszDesc),
2907 VERR_PGM_SAVED_MMIO2_PAGE_NOT_FOUND);
2908 void * const pvDstPage = &pMmio2RamRange->pbR3[(size_t)iPage << GUEST_PAGE_SHIFT];
2909
2910 /*
2911 * Load the page bits.
2912 */
2913 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_MMIO2_ZERO)
2914 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
2915 else
2916 {
2917 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
2918 if (RT_FAILURE(rc))
2919 return rc;
2920 }
2921 GCPhys = NIL_RTGCPHYS;
2922 break;
2923 }
2924
2925 /*
2926 * ROM pages.
2927 */
2928 case PGM_STATE_REC_ROM_VIRGIN:
2929 case PGM_STATE_REC_ROM_SHW_RAW:
2930 case PGM_STATE_REC_ROM_SHW_ZERO:
2931 case PGM_STATE_REC_ROM_PROT:
2932 {
2933 /*
2934 * Get the ID + page number and resolved that into a ROM page descriptor.
2935 */
2936 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2937 iPage++;
2938 else
2939 {
2940 SSMR3GetU8(pSSM, &id);
2941 rc = SSMR3GetU32(pSSM, &iPage);
2942 if (RT_FAILURE(rc))
2943 return rc;
2944 }
2945 if ( !pRom
2946 || pRom->idSavedState != id)
2947 {
2948 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
2949 uint32_t idx;
2950 for (idx = 0; idx < cRomRanges; idx++)
2951 {
2952 pRom = pVM->pgm.s.apRomRanges[idx];
2953 if (pRom->idSavedState == id)
2954 break;
2955 }
2956 AssertLogRelMsgReturn(idx < cRomRanges, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_ROM_RANGE_NOT_FOUND);
2957 }
2958 AssertLogRelMsgReturn(iPage < (pRom->cb >> GUEST_PAGE_SHIFT),
2959 ("iPage=%#x cb=%RGp %s\n", iPage, pRom->cb, pRom->pszDesc),
2960 VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2961 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2962 GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
2963
2964 /*
2965 * Get and set the protection.
2966 */
2967 uint8_t u8Prot;
2968 rc = SSMR3GetU8(pSSM, &u8Prot);
2969 if (RT_FAILURE(rc))
2970 return rc;
2971 PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
2972 AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_PGM_SAVED_ROM_PAGE_PROT);
2973
2974 if (enmProt != pRomPage->enmProt)
2975 {
2976 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
2977 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2978 N_("Protection change of unshadowed ROM page: GCPhys=%RGp enmProt=%d %s"),
2979 GCPhys, enmProt, pRom->pszDesc);
2980 rc = PGMR3PhysRomProtect(pVM, GCPhys, GUEST_PAGE_SIZE, enmProt);
2981 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2982 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2983 }
2984 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
2985 break; /* done */
2986
2987 /*
2988 * Get the right page descriptor.
2989 */
2990 PPGMPAGE pRealPage;
2991 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2992 {
2993 case PGM_STATE_REC_ROM_VIRGIN:
2994 if (!PGMROMPROT_IS_ROM(enmProt))
2995 pRealPage = &pRomPage->Virgin;
2996 else
2997 pRealPage = NULL;
2998 break;
2999
3000 case PGM_STATE_REC_ROM_SHW_RAW:
3001 case PGM_STATE_REC_ROM_SHW_ZERO:
3002 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
3003 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
3004 N_("Shadowed / non-shadowed page type mismatch: GCPhys=%RGp enmProt=%d %s"),
3005 GCPhys, enmProt, pRom->pszDesc);
3006 if (PGMROMPROT_IS_ROM(enmProt))
3007 pRealPage = &pRomPage->Shadow;
3008 else
3009 pRealPage = NULL;
3010 break;
3011
3012 default: AssertLogRelFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE); /* shut up gcc */
3013 }
3014#ifdef VBOX_WITH_PGM_NEM_MODE
3015 bool const fAltPage = pRealPage != NULL;
3016#endif
3017 if (!pRealPage)
3018 {
3019 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pRealPage, &pRamHint);
3020 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
3021 }
3022
3023 /*
3024 * Make it writable and map it (if necessary).
3025 */
3026 void *pvDstPage = NULL;
3027 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
3028 {
3029 case PGM_STATE_REC_ROM_SHW_ZERO:
3030 if ( PGM_PAGE_IS_ZERO(pRealPage)
3031 || PGM_PAGE_IS_BALLOONED(pRealPage))
3032 break;
3033 /** @todo implement zero page replacing. */
3034 RT_FALL_THRU();
3035 case PGM_STATE_REC_ROM_VIRGIN:
3036 case PGM_STATE_REC_ROM_SHW_RAW:
3037#ifdef VBOX_WITH_PGM_NEM_MODE
3038 if (fAltPage && PGM_IS_IN_NEM_MODE(pVM))
3039 pvDstPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
3040 else
3041#endif
3042 {
3043 rc = pgmPhysPageMakeWritableAndMap(pVM, pRealPage, GCPhys, &pvDstPage);
3044 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
3045 }
3046 break;
3047 }
3048
3049 /*
3050 * Load the bits.
3051 */
3052 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
3053 {
3054 case PGM_STATE_REC_ROM_SHW_ZERO:
3055 if (pvDstPage)
3056 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
3057 break;
3058
3059 case PGM_STATE_REC_ROM_VIRGIN:
3060 case PGM_STATE_REC_ROM_SHW_RAW:
3061 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
3062 if (RT_FAILURE(rc))
3063 return rc;
3064 break;
3065 }
3066 GCPhys = NIL_RTGCPHYS;
3067 break;
3068 }
3069
3070 /*
3071 * Unknown type.
3072 */
3073 default:
3074 AssertLogRelMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
3075 }
3076 } /* forever */
3077}
3078
3079
3080/**
3081 * Worker for pgmR3Load.
3082 *
3083 * @returns VBox status code.
3084 *
3085 * @param pVM The cross context VM structure.
3086 * @param pSSM The SSM handle.
3087 * @param uVersion The saved state version.
3088 */
3089static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
3090{
3091 PPGM pPGM = &pVM->pgm.s;
3092 int rc;
3093 uint32_t u32Sep;
3094
3095 /*
3096 * Load basic data (required / unaffected by relocation).
3097 */
3098 if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
3099 {
3100 if (uVersion > PGM_SAVED_STATE_VERSION_PRE_BALLOON)
3101 rc = SSMR3GetStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFields[0], NULL /*pvUser*/);
3102 else
3103 rc = SSMR3GetStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFieldsPreBalloon[0], NULL /*pvUser*/);
3104
3105 AssertLogRelRCReturn(rc, rc);
3106
3107 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3108 {
3109 if (uVersion > PGM_SAVED_STATE_VERSION_PRE_PAE)
3110 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFields[0]);
3111 else
3112 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFieldsPrePae[0]);
3113 AssertLogRelRCReturn(rc, rc);
3114 }
3115 }
3116 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
3117 {
3118 AssertRelease(pVM->cCpus == 1);
3119
3120 PGMOLD pgmOld;
3121 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
3122 AssertLogRelRCReturn(rc, rc);
3123
3124 PVMCPU pVCpu0 = pVM->apCpusR3[0];
3125 pVCpu0->pgm.s.fA20Enabled = pgmOld.fA20Enabled;
3126 pVCpu0->pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
3127 pVCpu0->pgm.s.enmGuestMode = pgmOld.enmGuestMode;
3128 }
3129 else
3130 {
3131 AssertRelease(pVM->cCpus == 1);
3132
3133 SSMR3Skip(pSSM, sizeof(bool));
3134 RTGCPTR GCPtrIgn;
3135 SSMR3GetGCPtr(pSSM, &GCPtrIgn);
3136 SSMR3Skip(pSSM, sizeof(uint32_t));
3137
3138 uint32_t cbRamSizeIgnored;
3139 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
3140 if (RT_FAILURE(rc))
3141 return rc;
3142 PVMCPU pVCpu0 = pVM->apCpusR3[0];
3143 SSMR3GetGCPhys(pSSM, &pVCpu0->pgm.s.GCPhysA20Mask);
3144
3145 uint32_t u32 = 0;
3146 SSMR3GetUInt(pSSM, &u32);
3147 pVCpu0->pgm.s.fA20Enabled = !!u32;
3148 SSMR3GetUInt(pSSM, &pVCpu0->pgm.s.fSyncFlags);
3149 RTUINT uGuestMode;
3150 SSMR3GetUInt(pSSM, &uGuestMode);
3151 pVCpu0->pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
3152
3153 /* check separator. */
3154 SSMR3GetU32(pSSM, &u32Sep);
3155 if (RT_FAILURE(rc))
3156 return rc;
3157 if (u32Sep != (uint32_t)~0)
3158 {
3159 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
3160 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
3161 }
3162 }
3163
3164 /*
3165 * Fix the A20 mask.
3166 */
3167 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3168 {
3169 PVMCPU pVCpu = pVM->apCpusR3[i];
3170 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!pVCpu->pgm.s.fA20Enabled << 20);
3171#ifdef VBOX_VMM_TARGET_X86
3172# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
3173 pgmR3RefreshShadowModeAfterA20Change(pVCpu);
3174# endif
3175#endif
3176 }
3177
3178 /*
3179 * The guest mappings - skipped now, see re-fixation in the caller.
3180 */
3181 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
3182 {
3183 for (uint32_t i = 0; ; i++)
3184 {
3185 rc = SSMR3GetU32(pSSM, &u32Sep); /* sequence number */
3186 if (RT_FAILURE(rc))
3187 return rc;
3188 if (u32Sep == ~0U)
3189 break;
3190 AssertMsgReturn(u32Sep == i, ("u32Sep=%#x i=%#x\n", u32Sep, i), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
3191
3192 char szDesc[256];
3193 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
3194 if (RT_FAILURE(rc))
3195 return rc;
3196 RTGCPTR GCPtrIgnore;
3197 SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* GCPtr */
3198 rc = SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* cPTs */
3199 if (RT_FAILURE(rc))
3200 return rc;
3201 }
3202 }
3203
3204 /*
3205 * Load the RAM contents.
3206 */
3207 if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
3208 {
3209 if (!pVM->pgm.s.LiveSave.fActive)
3210 {
3211 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3212 {
3213 rc = pgmR3LoadRamConfig(pVM, pSSM);
3214 if (RT_FAILURE(rc))
3215 return rc;
3216 }
3217 rc = pgmR3LoadRomRanges(pVM, pSSM);
3218 if (RT_FAILURE(rc))
3219 return rc;
3220 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3221 if (RT_FAILURE(rc))
3222 return rc;
3223 }
3224
3225 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, SSM_PASS_FINAL);
3226 }
3227 else
3228 rc = pgmR3LoadMemoryOld(pVM, pSSM, uVersion);
3229
3230#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
3231 /* Refresh balloon accounting. */
3232 if (pVM->pgm.s.cBalloonedPages)
3233 {
3234 Log(("pgmR3LoadFinalLocked: pVM=%p cBalloonedPages=%#x\n", pVM, pVM->pgm.s.cBalloonedPages));
3235 rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_INFLATE, pVM->pgm.s.cBalloonedPages);
3236 AssertRCReturn(rc, rc);
3237 }
3238#endif
3239 return rc;
3240}
3241
3242
3243/**
3244 * @callback_method_impl{FNSSMINTLOADEXEC}
3245 */
3246static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3247{
3248 int rc;
3249
3250 /*
3251 * Validate version.
3252 */
3253 if ( ( uPass != SSM_PASS_FINAL
3254 && uVersion != PGM_SAVED_STATE_VERSION
3255 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3256 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3257 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3258 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3259 || ( uVersion != PGM_SAVED_STATE_VERSION
3260 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3261 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3262 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3263 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG
3264 && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
3265 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
3266 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
3267 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
3268 )
3269 {
3270 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
3271 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3272 }
3273
3274 /*
3275 * Do the loading while owning the lock because a bunch of the functions
3276 * we're using requires this.
3277 */
3278 if (uPass != SSM_PASS_FINAL)
3279 {
3280 PGM_LOCK_VOID(pVM);
3281 if (uPass != 0)
3282 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3283 else
3284 {
3285 pVM->pgm.s.LiveSave.fActive = true;
3286 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3287 rc = pgmR3LoadRamConfig(pVM, pSSM);
3288 else
3289 rc = VINF_SUCCESS;
3290 if (RT_SUCCESS(rc))
3291 rc = pgmR3LoadRomRanges(pVM, pSSM);
3292 if (RT_SUCCESS(rc))
3293 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3294 if (RT_SUCCESS(rc))
3295 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3296 }
3297 PGM_UNLOCK(pVM);
3298 }
3299 else
3300 {
3301 PGM_LOCK_VOID(pVM);
3302 rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
3303 pVM->pgm.s.LiveSave.fActive = false;
3304 PGM_UNLOCK(pVM);
3305 if (RT_SUCCESS(rc))
3306 {
3307 /*
3308 * We require a full resync now.
3309 */
3310 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3311 {
3312 PVMCPU pVCpu = pVM->apCpusR3[i];
3313 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
3314 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3315 /** @todo For guest PAE, we might get the wrong
3316 * aGCPhysGstPaePDs values now. We should used the
3317 * saved ones... Postponing this since it nothing new
3318 * and PAE/PDPTR needs some general readjusting, see
3319 * @bugref{5880}. */
3320 }
3321
3322 pgmR3HandlerPhysicalUpdateAll(pVM);
3323
3324 /*
3325 * Change the paging mode (indirectly restores PGMCPU::GCPhysCR3).
3326 * (Requires the CPUM state to be restored already!)
3327 */
3328 if (CPUMR3IsStateRestorePending(pVM))
3329 return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
3330 N_("PGM was unexpectedly restored before CPUM"));
3331
3332 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3333 {
3334 PVMCPU pVCpu = pVM->apCpusR3[i];
3335
3336 /** @todo ARM VMs may have an invalid value here, since PGMMODE_NONE was
3337 * moved from 12 to 31. Thus far, though, this is a complete NOOP on
3338 * ARM and we still have very limited PGM functionality there (the
3339 * saved state is mostly X86-isms). */
3340 rc = PGMHCChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode, false /* fForce */);
3341 AssertLogRelRCReturn(rc, rc);
3342
3343#ifdef VBOX_VMM_TARGET_X86
3344 /* Update the PSE, NX flags and validity masks. */
3345 pVCpu->pgm.s.fGst32BitPageSizeExtension = CPUMIsGuestPageSizeExtEnabled(pVCpu);
3346 PGMNotifyNxeChanged(pVCpu, CPUMIsGuestNXEnabled(pVCpu));
3347#endif
3348 }
3349 }
3350 }
3351
3352 return rc;
3353}
3354
3355
3356/**
3357 * @callback_method_impl{FNSSMINTLOADDONE}
3358 */
3359static DECLCALLBACK(int) pgmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
3360{
3361 pVM->pgm.s.fRestoreRomPagesOnReset = true;
3362 NOREF(pSSM);
3363 return VINF_SUCCESS;
3364}
3365
3366
3367/**
3368 * Registers the saved state callbacks with SSM.
3369 *
3370 * @returns VBox status code.
3371 * @param pVM The cross context VM structure.
3372 * @param cbRam The RAM size.
3373 */
3374int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
3375{
3376 return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
3377 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
3378 NULL, pgmR3SaveExec, pgmR3SaveDone,
3379 pgmR3LoadPrep, pgmR3Load, pgmR3LoadDone);
3380}
3381
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette