VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp@ 105702

Last change on this file since 105702 was 104840, checked in by vboxsync, 6 months ago

VMM/PGM: Refactored RAM ranges, MMIO2 ranges and ROM ranges and added MMIO ranges (to PGM) so we can safely access RAM ranges at runtime w/o fear of them ever being freed up. It is now only possible to create these during VM creation and loading, and they will live till VM destruction (except for MMIO2 which could be destroyed during loading (PCNet fun)). The lookup handling is by table instead of pointer tree. No more ring-0 pointers in shared data. bugref:10687 bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 132.5 KB
Line 
1/* $Id: PGMSavedState.cpp 104840 2024-06-05 00:59:51Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, The Saved State Part.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/stam.h>
36#include <VBox/vmm/ssm.h>
37#include <VBox/vmm/pdmdrv.h>
38#include <VBox/vmm/pdmdev.h>
39#include "PGMInternal.h"
40#include <VBox/vmm/vmcc.h>
41#include "PGMInline.h"
42
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46#include <iprt/asm.h>
47#include <iprt/assert.h>
48#include <iprt/crc.h>
49#include <iprt/mem.h>
50#include <iprt/sha.h>
51#include <iprt/string.h>
52#include <iprt/thread.h>
53
54
55/*********************************************************************************************************************************
56* Defined Constants And Macros *
57*********************************************************************************************************************************/
58/** Saved state data unit version. */
59#define PGM_SAVED_STATE_VERSION 14
60/** Saved state data unit version before the PAE PDPE registers. */
61#define PGM_SAVED_STATE_VERSION_PRE_PAE 13
62/** Saved state data unit version after this includes ballooned page flags in
63 * the state (see @bugref{5515}). */
64#define PGM_SAVED_STATE_VERSION_BALLOON_BROKEN 12
65/** Saved state before the balloon change. */
66#define PGM_SAVED_STATE_VERSION_PRE_BALLOON 11
67/** Saved state data unit version used during 3.1 development, misses the RAM
68 * config. */
69#define PGM_SAVED_STATE_VERSION_NO_RAM_CFG 10
70/** Saved state data unit version for 3.0 (pre teleportation). */
71#define PGM_SAVED_STATE_VERSION_3_0_0 9
72/** Saved state data unit version for 2.2.2 and later. */
73#define PGM_SAVED_STATE_VERSION_2_2_2 8
74/** Saved state data unit version for 2.2.0. */
75#define PGM_SAVED_STATE_VERSION_RR_DESC 7
76/** Saved state data unit version. */
77#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6
78
79
80/** @name Sparse state record types
81 * @{ */
82/** Zero page. No data. */
83#define PGM_STATE_REC_RAM_ZERO UINT8_C(0x00)
84/** Raw page. */
85#define PGM_STATE_REC_RAM_RAW UINT8_C(0x01)
86/** Raw MMIO2 page. */
87#define PGM_STATE_REC_MMIO2_RAW UINT8_C(0x02)
88/** Zero MMIO2 page. */
89#define PGM_STATE_REC_MMIO2_ZERO UINT8_C(0x03)
90/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
91#define PGM_STATE_REC_ROM_VIRGIN UINT8_C(0x04)
92/** Raw shadowed ROM page. The protection (8-bit) precedes the raw bits. */
93#define PGM_STATE_REC_ROM_SHW_RAW UINT8_C(0x05)
94/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
95#define PGM_STATE_REC_ROM_SHW_ZERO UINT8_C(0x06)
96/** ROM protection (8-bit). */
97#define PGM_STATE_REC_ROM_PROT UINT8_C(0x07)
98/** Ballooned page. No data. */
99#define PGM_STATE_REC_RAM_BALLOONED UINT8_C(0x08)
100/** The last record type. */
101#define PGM_STATE_REC_LAST PGM_STATE_REC_RAM_BALLOONED
102/** End marker. */
103#define PGM_STATE_REC_END UINT8_C(0xff)
104/** Flag indicating that the data is preceded by the page address.
105 * For RAW pages this is a RTGCPHYS. For MMIO2 and ROM pages this is a 8-bit
106 * range ID and a 32-bit page index.
107 */
108#define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80)
109/** @} */
110
111/** The CRC-32 for a zero page. */
112#define PGM_STATE_CRC32_ZERO_PAGE UINT32_C(0xc71c0011)
113/** The CRC-32 for a zero half page. */
114#define PGM_STATE_CRC32_ZERO_HALF_PAGE UINT32_C(0xf1e8ba9e)
115
116
117
118/** @name Old Page types used in older saved states.
119 * @{ */
120/** Old saved state: The usual invalid zero entry. */
121#define PGMPAGETYPE_OLD_INVALID 0
122/** Old saved state: RAM page. (RWX) */
123#define PGMPAGETYPE_OLD_RAM 1
124/** Old saved state: MMIO2 page. (RWX) */
125#define PGMPAGETYPE_OLD_MMIO2 1
126/** Old saved state: MMIO2 page aliased over an MMIO page. (RWX)
127 * See PGMHandlerPhysicalPageAlias(). */
128#define PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO 2
129/** Old saved state: Shadowed ROM. (RWX) */
130#define PGMPAGETYPE_OLD_ROM_SHADOW 3
131/** Old saved state: ROM page. (R-X) */
132#define PGMPAGETYPE_OLD_ROM 4
133/** Old saved state: MMIO page. (---) */
134#define PGMPAGETYPE_OLD_MMIO 5
135/** @} */
136
137
138/*********************************************************************************************************************************
139* Structures and Typedefs *
140*********************************************************************************************************************************/
141/** For loading old saved states. (pre-smp) */
142typedef struct
143{
144 /** If set no conflict checks are required. (boolean) */
145 bool fMappingsFixed;
146 /** Size of fixed mapping */
147 uint32_t cbMappingFixed;
148 /** Base address (GC) of fixed mapping */
149 RTGCPTR GCPtrMappingFixed;
150 /** A20 gate mask.
151 * Our current approach to A20 emulation is to let REM do it and don't bother
152 * anywhere else. The interesting guests will be operating with it enabled anyway.
153 * But should the need arise, we'll subject physical addresses to this mask. */
154 RTGCPHYS GCPhysA20Mask;
155 /** A20 gate state - boolean! */
156 bool fA20Enabled;
157 /** The guest paging mode. */
158 PGMMODE enmGuestMode;
159} PGMOLD;
160
161
162/*********************************************************************************************************************************
163* Global Variables *
164*********************************************************************************************************************************/
165/** PGM fields to save/load. */
166
167static const SSMFIELD s_aPGMFields[] =
168{
169 SSMFIELD_ENTRY_OLD( fMappingsFixed, sizeof(bool)),
170 SSMFIELD_ENTRY_OLD_GCPTR( GCPtrMappingFixed),
171 SSMFIELD_ENTRY_OLD( cbMappingFixed, sizeof(uint32_t)),
172 SSMFIELD_ENTRY( PGM, cBalloonedPages),
173 SSMFIELD_ENTRY_TERM()
174};
175
176static const SSMFIELD s_aPGMFieldsPreBalloon[] =
177{
178 SSMFIELD_ENTRY_OLD( fMappingsFixed, sizeof(bool)),
179 SSMFIELD_ENTRY_OLD_GCPTR( GCPtrMappingFixed),
180 SSMFIELD_ENTRY_OLD( cbMappingFixed, sizeof(uint32_t)),
181 SSMFIELD_ENTRY_TERM()
182};
183
184static const SSMFIELD s_aPGMCpuFields[] =
185{
186 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
187 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
188 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
189 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[0]),
190 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[1]),
191 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[2]),
192 SSMFIELD_ENTRY( PGMCPU, aGCPhysGstPaePDs[3]),
193 SSMFIELD_ENTRY_TERM()
194};
195
196static const SSMFIELD s_aPGMCpuFieldsPrePae[] =
197{
198 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
199 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
200 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
201 SSMFIELD_ENTRY_TERM()
202};
203
204static const SSMFIELD s_aPGMFields_Old[] =
205{
206 SSMFIELD_ENTRY( PGMOLD, fMappingsFixed),
207 SSMFIELD_ENTRY_GCPTR( PGMOLD, GCPtrMappingFixed),
208 SSMFIELD_ENTRY( PGMOLD, cbMappingFixed),
209 SSMFIELD_ENTRY( PGMOLD, fA20Enabled),
210 SSMFIELD_ENTRY_GCPHYS( PGMOLD, GCPhysA20Mask),
211 SSMFIELD_ENTRY( PGMOLD, enmGuestMode),
212 SSMFIELD_ENTRY_TERM()
213};
214
215
216/**
217 * Find the ROM tracking structure for the given page.
218 *
219 * @returns Pointer to the ROM page structure. NULL if the caller didn't check
220 * that it's a ROM page.
221 * @param pVM The cross context VM structure.
222 * @param GCPhys The address of the ROM page.
223 */
224static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
225{
226 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
227 for (uint32_t idx = 0; idx < cRomRanges; idx++)
228 {
229 PPGMROMRANGE const pRomRange = pVM->pgm.s.apRomRanges[idx];
230 RTGCPHYS const off = GCPhys - pRomRange->GCPhys;
231 if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
232 return &pRomRange->aPages[off >> GUEST_PAGE_SHIFT];
233 }
234 return NULL;
235}
236
237
238/**
239 * Prepares the ROM pages for a live save.
240 *
241 * @returns VBox status code.
242 * @param pVM The cross context VM structure.
243 */
244static int pgmR3PrepRomPages(PVM pVM)
245{
246 /*
247 * Initialize the live save tracking in the ROM page descriptors.
248 */
249 PGM_LOCK_VOID(pVM);
250 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
251 for (uint32_t idx = 0; idx < cRomRanges; idx++)
252 {
253 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
254 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
255 PPGMRAMRANGE pRamHint = NULL;
256
257 for (uint32_t iPage = 0; iPage < cPages; iPage++)
258 {
259 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)PGMROMPROT_INVALID;
260 pRom->aPages[iPage].LiveSave.fWrittenTo = false;
261 pRom->aPages[iPage].LiveSave.fDirty = true;
262 pRom->aPages[iPage].LiveSave.fDirtiedRecently = true;
263 if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
264 {
265 if (PGMROMPROT_IS_ROM(pRom->aPages[iPage].enmProt))
266 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
267 else
268 {
269 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
270 PPGMPAGE pPage;
271 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
272 AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys));
273 if (RT_SUCCESS(rc))
274 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(pPage) && !PGM_PAGE_IS_BALLOONED(pPage);
275 else
276 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
277 }
278 }
279 }
280
281 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
282 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
283 pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
284 }
285 PGM_UNLOCK(pVM);
286
287 return VINF_SUCCESS;
288}
289
290
291/**
292 * Assigns IDs to the ROM ranges and saves them.
293 *
294 * @returns VBox status code.
295 * @param pVM The cross context VM structure.
296 * @param pSSM Saved state handle.
297 */
298static int pgmR3SaveRomRanges(PVM pVM, PSSMHANDLE pSSM)
299{
300 PGM_LOCK_VOID(pVM);
301 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
302 for (uint32_t idx = 0; idx < cRomRanges; idx++)
303 {
304 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
305 uint8_t const idSavedState = (uint8_t)(idx + 1);
306 pRom->idSavedState = idSavedState;
307 SSMR3PutU8(pSSM, idSavedState);
308 SSMR3PutStrZ(pSSM, ""); /* device name */
309 SSMR3PutU32(pSSM, 0); /* device instance */
310 SSMR3PutU8(pSSM, 0); /* region */
311 SSMR3PutStrZ(pSSM, pRom->pszDesc);
312 SSMR3PutGCPhys(pSSM, pRom->GCPhys);
313 int rc = SSMR3PutGCPhys(pSSM, pRom->cb);
314 if (RT_FAILURE(rc))
315 break;
316 }
317 PGM_UNLOCK(pVM);
318 return SSMR3PutU8(pSSM, UINT8_MAX);
319}
320
321
322/**
323 * Loads the ROM range ID assignments.
324 *
325 * @returns VBox status code.
326 *
327 * @param pVM The cross context VM structure.
328 * @param pSSM The saved state handle.
329 */
330static int pgmR3LoadRomRanges(PVM pVM, PSSMHANDLE pSSM)
331{
332 PGM_LOCK_ASSERT_OWNER(pVM);
333
334 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
335 for (uint32_t idx = 0; idx < cRomRanges; idx++)
336 pVM->pgm.s.apRomRanges[idx]->idSavedState = UINT8_MAX;
337
338 for (;;)
339 {
340 /*
341 * Read the data.
342 */
343 uint8_t id;
344 int rc = SSMR3GetU8(pSSM, &id);
345 if (RT_FAILURE(rc))
346 return rc;
347 if (id == UINT8_MAX)
348 {
349 /*
350 * End of ROM ranges. Check that all are accounted for.
351 */
352 for (uint32_t idx = 0; idx < cRomRanges; idx++)
353 {
354 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
355 if (pRom->idSavedState != UINT8_MAX)
356 { /* likely */ }
357 else if (pRom->fFlags & PGMPHYS_ROM_FLAGS_MAYBE_MISSING_FROM_STATE)
358 LogRel(("PGM: The '%s' ROM was not found in the saved state, but it is marked as maybe-missing, so that's probably okay.\n",
359 pRom->pszDesc));
360 else
361 AssertLogRelMsg(pRom->idSavedState != UINT8_MAX,
362 ("The '%s' ROM was not found in the saved state. Probably due to some misconfiguration\n",
363 pRom->pszDesc));
364 }
365 return VINF_SUCCESS; /* the end */
366 }
367 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
368
369 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
370 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
371 AssertLogRelRCReturn(rc, rc);
372
373 uint32_t uInstance;
374 SSMR3GetU32(pSSM, &uInstance);
375 uint8_t iRegion;
376 SSMR3GetU8(pSSM, &iRegion);
377
378 char szDesc[64];
379 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
380 AssertLogRelRCReturn(rc, rc);
381
382 RTGCPHYS GCPhys;
383 SSMR3GetGCPhys(pSSM, &GCPhys);
384 RTGCPHYS cb;
385 rc = SSMR3GetGCPhys(pSSM, &cb);
386 if (RT_FAILURE(rc))
387 return rc;
388 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
389 AssertLogRelMsgReturn(!(cb & GUEST_PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
390
391 /*
392 * Locate a matching ROM range.
393 */
394 AssertLogRelMsgReturn( uInstance == 0
395 && iRegion == 0
396 && szDevName[0] == '\0',
397 ("GCPhys=%RGp LB %RGp %s\n", GCPhys, cb, szDesc),
398 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
399 uint32_t idx;
400 for (idx = 0; idx < cRomRanges; idx++)
401 {
402 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
403 if ( pRom->idSavedState == UINT8_MAX
404 && !strcmp(pRom->pszDesc, szDesc))
405 {
406 pRom->idSavedState = id;
407 break;
408 }
409 }
410 if (idx >= cRomRanges)
411 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("ROM at %RGp LB %RGp by the name '%s' was not found"),
412 GCPhys, cb, szDesc);
413 } /* forever */
414}
415
416
417/**
418 * Scan ROM pages.
419 *
420 * @param pVM The cross context VM structure.
421 */
422static void pgmR3ScanRomPages(PVM pVM)
423{
424 /*
425 * The shadow ROMs.
426 */
427 PGM_LOCK_VOID(pVM);
428 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
429 for (uint32_t idx = 0; idx < cRomRanges; idx++)
430 {
431 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
432 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
433 {
434 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
435 for (uint32_t iPage = 0; iPage < cPages; iPage++)
436 {
437 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
438 if (pRomPage->LiveSave.fWrittenTo)
439 {
440 pRomPage->LiveSave.fWrittenTo = false;
441 if (!pRomPage->LiveSave.fDirty)
442 {
443 pRomPage->LiveSave.fDirty = true;
444 pVM->pgm.s.LiveSave.Rom.cReadyPages--;
445 pVM->pgm.s.LiveSave.Rom.cDirtyPages++;
446 }
447 pRomPage->LiveSave.fDirtiedRecently = true;
448 }
449 else
450 pRomPage->LiveSave.fDirtiedRecently = false;
451 }
452 }
453 }
454 PGM_UNLOCK(pVM);
455}
456
457
458/**
459 * Takes care of the virgin ROM pages in the first pass.
460 *
461 * This is an attempt at simplifying the handling of ROM pages a little bit.
462 * This ASSUMES that no new ROM ranges will be added and that they won't be
463 * relinked in any way.
464 *
465 * @param pVM The cross context VM structure.
466 * @param pSSM The SSM handle.
467 * @param fLiveSave Whether we're in a live save or not.
468 */
469static int pgmR3SaveRomVirginPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
470{
471 PGM_LOCK_VOID(pVM);
472 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
473 for (uint32_t idx = 0; idx < cRomRanges; idx++)
474 {
475 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
476 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
477 for (uint32_t iPage = 0; iPage < cPages; iPage++)
478 {
479 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
480 PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;
481
482 /* Get the virgin page descriptor. */
483 PPGMPAGE pPage;
484 if (PGMROMPROT_IS_ROM(enmProt))
485 pPage = pgmPhysGetPage(pVM, GCPhys);
486 else
487 pPage = &pRom->aPages[iPage].Virgin;
488
489 /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
490 int rc = VINF_SUCCESS;
491 char abPage[GUEST_PAGE_SIZE];
492 if ( !PGM_PAGE_IS_ZERO(pPage)
493 && !PGM_PAGE_IS_BALLOONED(pPage))
494 {
495 void const *pvPage;
496#ifdef VBOX_WITH_PGM_NEM_MODE
497 if (!PGMROMPROT_IS_ROM(enmProt) && PGM_IS_IN_NEM_MODE(pVM))
498 pvPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
499 else
500#endif
501 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
502 if (RT_SUCCESS(rc))
503 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
504 }
505 else
506 RT_ZERO(abPage);
507 PGM_UNLOCK(pVM);
508 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
509
510 /* Save it. */
511 if (iPage > 0)
512 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
513 else
514 {
515 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
516 SSMR3PutU8(pSSM, pRom->idSavedState);
517 SSMR3PutU32(pSSM, iPage);
518 }
519 SSMR3PutU8(pSSM, (uint8_t)enmProt);
520 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
521 if (RT_FAILURE(rc))
522 return rc;
523
524 /* Update state. */
525 PGM_LOCK_VOID(pVM);
526 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
527 if (fLiveSave)
528 {
529 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
530 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
531 pVM->pgm.s.LiveSave.cSavedPages++;
532 }
533 }
534 }
535 PGM_UNLOCK(pVM);
536 return VINF_SUCCESS;
537}
538
539
540/**
541 * Saves dirty pages in the shadowed ROM ranges.
542 *
543 * Used by pgmR3LiveExecPart2 and pgmR3SaveExecMemory.
544 *
545 * @returns VBox status code.
546 * @param pVM The cross context VM structure.
547 * @param pSSM The SSM handle.
548 * @param fLiveSave Whether it's a live save or not.
549 * @param fFinalPass Whether this is the final pass or not.
550 */
551static int pgmR3SaveShadowedRomPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
552{
553 /*
554 * The Shadowed ROMs.
555 *
556 * ASSUMES that the ROM ranges are fixed.
557 * ASSUMES that all the ROM ranges are mapped.
558 */
559 PGM_LOCK_VOID(pVM);
560 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
561 for (uint32_t idx = 0; idx < cRomRanges; idx++)
562 {
563 PPGMROMRANGE const pRom = pVM->pgm.s.apRomRanges[idx];
564 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
565 {
566 uint32_t const cPages = pRom->cb >> GUEST_PAGE_SHIFT;
567 uint32_t iPrevPage = cPages;
568 for (uint32_t iPage = 0; iPage < cPages; iPage++)
569 {
570 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
571 if ( !fLiveSave
572 || ( pRomPage->LiveSave.fDirty
573 && ( ( !pRomPage->LiveSave.fDirtiedRecently
574 && !pRomPage->LiveSave.fWrittenTo)
575 || fFinalPass
576 )
577 )
578 )
579 {
580 uint8_t abPage[GUEST_PAGE_SIZE];
581 PGMROMPROT enmProt = pRomPage->enmProt;
582 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
583 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(pVM, GCPhys);
584 bool fZero = PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_BALLOONED(pPage); Assert(!PGM_PAGE_IS_BALLOONED(pPage)); /* Shouldn't be ballooned. */
585 int rc = VINF_SUCCESS;
586 if (!fZero)
587 {
588 void const *pvPage;
589#ifdef VBOX_WITH_PGM_NEM_MODE
590 if (PGMROMPROT_IS_ROM(enmProt) && PGM_IS_IN_NEM_MODE(pVM))
591 pvPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
592 else
593#endif
594 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
595 if (RT_SUCCESS(rc))
596 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
597 }
598 if (fLiveSave && RT_SUCCESS(rc))
599 {
600 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
601 pRomPage->LiveSave.fDirty = false;
602 pVM->pgm.s.LiveSave.Rom.cReadyPages++;
603 pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
604 pVM->pgm.s.LiveSave.cSavedPages++;
605 }
606 PGM_UNLOCK(pVM);
607 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
608
609 if (iPage - 1U == iPrevPage && iPage > 0)
610 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW));
611 else
612 {
613 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW) | PGM_STATE_REC_FLAG_ADDR);
614 SSMR3PutU8(pSSM, pRom->idSavedState);
615 SSMR3PutU32(pSSM, iPage);
616 }
617 rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
618 if (!fZero)
619 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
620 if (RT_FAILURE(rc))
621 return rc;
622
623 PGM_LOCK_VOID(pVM);
624 iPrevPage = iPage;
625 }
626 /*
627 * In the final pass, make sure the protection is in sync.
628 */
629 else if ( fFinalPass
630 && pRomPage->LiveSave.u8Prot != pRomPage->enmProt)
631 {
632 PGMROMPROT enmProt = pRomPage->enmProt;
633 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
634 PGM_UNLOCK(pVM);
635
636 if (iPage - 1U == iPrevPage && iPage > 0)
637 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT);
638 else
639 {
640 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT | PGM_STATE_REC_FLAG_ADDR);
641 SSMR3PutU8(pSSM, pRom->idSavedState);
642 SSMR3PutU32(pSSM, iPage);
643 }
644 int rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
645 if (RT_FAILURE(rc))
646 return rc;
647
648 PGM_LOCK_VOID(pVM);
649 iPrevPage = iPage;
650 }
651 }
652 }
653 }
654 PGM_UNLOCK(pVM);
655 return VINF_SUCCESS;
656}
657
658
659/**
660 * Cleans up ROM pages after a live save.
661 *
662 * @param pVM The cross context VM structure.
663 */
664static void pgmR3DoneRomPages(PVM pVM)
665{
666 NOREF(pVM);
667}
668
669
670/**
671 * Prepares the MMIO2 pages for a live save.
672 *
673 * @returns VBox status code.
674 * @param pVM The cross context VM structure.
675 */
676static int pgmR3PrepMmio2Pages(PVM pVM)
677{
678 /*
679 * Initialize the live save tracking in the MMIO2 ranges.
680 * ASSUME nothing changes here.
681 */
682 PGM_LOCK_VOID(pVM);
683 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
684 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
685 {
686 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
687 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
688 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT;
689 PGM_UNLOCK(pVM);
690
691 PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM,
692 sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
693 if (!paLSPages)
694 return VERR_NO_MEMORY;
695 for (uint32_t iPage = 0; iPage < cPages; iPage++)
696 {
697 /* Initialize it as a dirty zero page. */
698 paLSPages[iPage].fDirty = true;
699 paLSPages[iPage].cUnchangedScans = 0;
700 paLSPages[iPage].fZero = true;
701 paLSPages[iPage].u32CrcH1 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
702 paLSPages[iPage].u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
703 }
704
705 PGM_LOCK_VOID(pVM);
706 pRegMmio2->paLSPages = paLSPages;
707 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
708 }
709 PGM_UNLOCK(pVM);
710 return VINF_SUCCESS;
711}
712
713
714/**
715 * Assigns IDs to the MMIO2 ranges and saves them.
716 *
717 * @returns VBox status code.
718 * @param pVM The cross context VM structure.
719 * @param pSSM Saved state handle.
720 */
721static int pgmR3SaveMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
722{
723 PGM_LOCK_VOID(pVM);
724 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
725 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
726 {
727 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
728 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
729 uint8_t const idSavedState = (uint8_t)(idx + 1);
730 pRegMmio2->idSavedState = idSavedState;
731 SSMR3PutU8(pSSM, idSavedState);
732 SSMR3PutStrZ(pSSM, pRegMmio2->pDevInsR3->pReg->szName);
733 SSMR3PutU32(pSSM, pRegMmio2->pDevInsR3->iInstance);
734 SSMR3PutU8(pSSM, pRegMmio2->iRegion);
735 SSMR3PutStrZ(pSSM, pRamRange->pszDesc);
736 int rc = SSMR3PutGCPhys(pSSM, pRamRange->cb);
737 if (RT_FAILURE(rc))
738 break;
739 }
740 PGM_UNLOCK(pVM);
741 return SSMR3PutU8(pSSM, UINT8_MAX);
742}
743
744
745/**
746 * Loads the MMIO2 range ID assignments.
747 *
748 * @returns VBox status code.
749 *
750 * @param pVM The cross context VM structure.
751 * @param pSSM The saved state handle.
752 */
753static int pgmR3LoadMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
754{
755 PGM_LOCK_ASSERT_OWNER(pVM);
756
757 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
758 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
759 pVM->pgm.s.aMmio2Ranges[idx].idSavedState = UINT8_MAX;
760
761 for (;;)
762 {
763 /*
764 * Read the data.
765 */
766 uint8_t id;
767 int rc = SSMR3GetU8(pSSM, &id);
768 if (RT_FAILURE(rc))
769 return rc;
770 if (id == UINT8_MAX)
771 {
772 /*
773 * End of MMIO2 ranges. Check that all are accounted for.
774 */
775 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
776 AssertLogRelMsg(pVM->pgm.s.aMmio2Ranges[idx].idSavedState != UINT8_MAX,
777 ("%s\n", pVM->pgm.s.apMmio2RamRanges[idx]->pszDesc));
778 return VINF_SUCCESS; /* the end */
779 }
780 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
781
782 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
783 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
784 AssertLogRelRCReturn(rc, rc);
785
786 uint32_t uInstance;
787 SSMR3GetU32(pSSM, &uInstance);
788 uint8_t iRegion;
789 SSMR3GetU8(pSSM, &iRegion);
790
791 char szDesc[64];
792 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
793 AssertLogRelRCReturn(rc, rc);
794
795 RTGCPHYS cb;
796 rc = SSMR3GetGCPhys(pSSM, &cb);
797 AssertLogRelRCReturn(rc, rc);
798 AssertLogRelMsgReturn(!(cb & GUEST_PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
799
800 /*
801 * Locate a matching MMIO2 range.
802 */
803 uint32_t idx;
804 for (idx = 0; idx < cMmio2Ranges; idx++)
805 {
806 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
807 if ( pRegMmio2->idSavedState == UINT8_MAX
808 && pRegMmio2->iRegion == iRegion
809 && pRegMmio2->pDevInsR3->iInstance == uInstance
810 && !strcmp(pRegMmio2->pDevInsR3->pReg->szName, szDevName))
811 {
812 pRegMmio2->idSavedState = id;
813 break;
814 }
815 }
816 if (idx >= cMmio2Ranges)
817 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to locate a MMIO2 range called '%s' owned by %s/%u, region %d"),
818 szDesc, szDevName, uInstance, iRegion);
819
820 /*
821 * Validate the configuration, the size of the MMIO2 region should be
822 * the same.
823 */
824 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
825 if (cb != pRamRange->cb)
826 {
827 LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n", pRamRange->pszDesc, cb, pRamRange->cb));
828 if (cb > pRamRange->cb) /* bad idea? */
829 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp"),
830 pRamRange->pszDesc, cb, pRamRange->cb);
831 }
832 } /* forever */
833}
834
835
836/**
837 * Scans one MMIO2 page.
838 *
839 * @returns True if changed, false if unchanged.
840 *
841 * @param pVM The cross context VM structure.
842 * @param pbPage The page bits.
843 * @param pLSPage The live save tracking structure for the page.
844 *
845 */
846DECLINLINE(bool) pgmR3ScanMmio2Page(PVM pVM, uint8_t const *pbPage, PPGMLIVESAVEMMIO2PAGE pLSPage)
847{
848 /*
849 * Special handling of zero pages.
850 */
851 bool const fZero = pLSPage->fZero;
852 if (fZero)
853 {
854 if (ASMMemIsZero(pbPage, GUEST_PAGE_SIZE))
855 {
856 /* Not modified. */
857 if (pLSPage->fDirty)
858 pLSPage->cUnchangedScans++;
859 return false;
860 }
861
862 pLSPage->fZero = false;
863 pLSPage->u32CrcH1 = RTCrc32(pbPage, GUEST_PAGE_SIZE / 2);
864 }
865 else
866 {
867 /*
868 * CRC the first half, if it doesn't match the page is dirty and
869 * we won't check the 2nd half (we'll do that next time).
870 */
871 uint32_t u32CrcH1 = RTCrc32(pbPage, GUEST_PAGE_SIZE / 2);
872 if (u32CrcH1 == pLSPage->u32CrcH1)
873 {
874 uint32_t u32CrcH2 = RTCrc32(pbPage + GUEST_PAGE_SIZE / 2, GUEST_PAGE_SIZE / 2);
875 if (u32CrcH2 == pLSPage->u32CrcH2)
876 {
877 /* Probably not modified. */
878 if (pLSPage->fDirty)
879 pLSPage->cUnchangedScans++;
880 return false;
881 }
882
883 pLSPage->u32CrcH2 = u32CrcH2;
884 }
885 else
886 {
887 pLSPage->u32CrcH1 = u32CrcH1;
888 if ( u32CrcH1 == PGM_STATE_CRC32_ZERO_HALF_PAGE
889 && ASMMemIsZero(pbPage, GUEST_PAGE_SIZE))
890 {
891 pLSPage->u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
892 pLSPage->fZero = true;
893 }
894 }
895 }
896
897 /* dirty page path */
898 pLSPage->cUnchangedScans = 0;
899 if (!pLSPage->fDirty)
900 {
901 pLSPage->fDirty = true;
902 pVM->pgm.s.LiveSave.Mmio2.cReadyPages--;
903 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages++;
904 if (fZero)
905 pVM->pgm.s.LiveSave.Mmio2.cZeroPages--;
906 }
907 return true;
908}
909
910
911/**
912 * Scan for MMIO2 page modifications.
913 *
914 * @param pVM The cross context VM structure.
915 * @param uPass The pass number.
916 */
917static void pgmR3ScanMmio2Pages(PVM pVM, uint32_t uPass)
918{
919 /*
920 * Since this is a bit expensive we lower the scan rate after a little while.
921 */
922 if ( ( (uPass & 3) != 0
923 && uPass > 10)
924 || uPass == SSM_PASS_FINAL)
925 return;
926
927 PGM_LOCK_VOID(pVM); /* paranoia */
928 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
929 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
930 {
931 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
932 PPGMLIVESAVEMMIO2PAGE paLSPages = pRegMmio2->paLSPages;
933 uint32_t cPages = pVM->pgm.s.apMmio2RamRanges[idx]->cb >> GUEST_PAGE_SHIFT;
934 PGM_UNLOCK(pVM);
935
936 for (uint32_t iPage = 0; iPage < cPages; iPage++)
937 {
938 uint8_t const *pbPage = &pRegMmio2->pbR3[iPage * GUEST_PAGE_SIZE];
939 pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]);
940 }
941
942 PGM_LOCK_VOID(pVM);
943 }
944 PGM_UNLOCK(pVM);
945
946}
947
948
949/**
950 * Save quiescent MMIO2 pages.
951 *
952 * @returns VBox status code.
953 * @param pVM The cross context VM structure.
954 * @param pSSM The SSM handle.
955 * @param fLiveSave Whether it's a live save or not.
956 * @param uPass The pass number.
957 */
958static int pgmR3SaveMmio2Pages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
959{
960 /** @todo implement live saving of MMIO2 pages. (Need some way of telling the
961 * device that we wish to know about changes.) */
962
963 int rc = VINF_SUCCESS;
964 if (uPass == SSM_PASS_FINAL)
965 {
966 /*
967 * The mop up round.
968 */
969 PGM_LOCK_VOID(pVM);
970 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
971 for (uint32_t idx = 0; idx < cMmio2Ranges && RT_SUCCESS(rc); idx++)
972 {
973 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
974 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
975 PPGMLIVESAVEMMIO2PAGE const paLSPages = pRegMmio2->paLSPages;
976 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT;
977 uint8_t const *pbPage = pRamRange->pbR3;
978 uint32_t iPageLast = cPages;
979 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += GUEST_PAGE_SIZE)
980 {
981 uint8_t u8Type;
982 if (!fLiveSave)
983 u8Type = ASMMemIsZero(pbPage, GUEST_PAGE_SIZE) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
984 else
985 {
986 /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
987 if ( !paLSPages[iPage].fDirty
988 && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
989 {
990 if (paLSPages[iPage].fZero)
991 continue;
992
993 uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
994 RTSha1(pbPage, GUEST_PAGE_SIZE, abSha1Hash);
995 if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
996 continue;
997 }
998 u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
999 pVM->pgm.s.LiveSave.cSavedPages++;
1000 }
1001
1002 if (iPage != 0 && iPage == iPageLast + 1)
1003 rc = SSMR3PutU8(pSSM, u8Type);
1004 else
1005 {
1006 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
1007 SSMR3PutU8(pSSM, pRegMmio2->idSavedState);
1008 rc = SSMR3PutU32(pSSM, iPage);
1009 }
1010 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
1011 rc = SSMR3PutMem(pSSM, pbPage, GUEST_PAGE_SIZE);
1012 if (RT_FAILURE(rc))
1013 break;
1014 iPageLast = iPage;
1015 }
1016 }
1017 PGM_UNLOCK(pVM);
1018 }
1019 /*
1020 * Reduce the rate after a little while since the current MMIO2 approach is
1021 * a bit expensive.
1022 * We position it two passes after the scan pass to avoid saving busy pages.
1023 */
1024 else if ( uPass <= 10
1025 || (uPass & 3) == 2)
1026 {
1027 PGM_LOCK_VOID(pVM);
1028 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
1029 for (uint32_t idx = 0; idx < cMmio2Ranges && RT_SUCCESS(rc); idx++)
1030 {
1031 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
1032 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apMmio2RamRanges[idx];
1033 PPGMLIVESAVEMMIO2PAGE const paLSPages = pRegMmio2->paLSPages;
1034 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT;
1035 uint8_t const *pbPage = pRamRange->pbR3;
1036 uint32_t iPageLast = cPages;
1037 PGM_UNLOCK(pVM);
1038
1039 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += GUEST_PAGE_SIZE)
1040 {
1041 /* Skip clean pages and pages which hasn't quiesced. */
1042 if (!paLSPages[iPage].fDirty)
1043 continue;
1044 if (paLSPages[iPage].cUnchangedScans < 3)
1045 continue;
1046 if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
1047 continue;
1048
1049 /* Save it. */
1050 bool const fZero = paLSPages[iPage].fZero;
1051 uint8_t abPage[GUEST_PAGE_SIZE];
1052 if (!fZero)
1053 {
1054 memcpy(abPage, pbPage, GUEST_PAGE_SIZE);
1055 RTSha1(abPage, GUEST_PAGE_SIZE, paLSPages[iPage].abSha1Saved);
1056 }
1057
1058 uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
1059 if (iPage != 0 && iPage == iPageLast + 1)
1060 rc = SSMR3PutU8(pSSM, u8Type);
1061 else
1062 {
1063 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
1064 SSMR3PutU8(pSSM, pRegMmio2->idSavedState);
1065 rc = SSMR3PutU32(pSSM, iPage);
1066 }
1067 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
1068 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
1069 if (RT_FAILURE(rc))
1070 break;
1071
1072 /* Housekeeping. */
1073 paLSPages[iPage].fDirty = false;
1074 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
1075 pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
1076 if (u8Type == PGM_STATE_REC_MMIO2_ZERO)
1077 pVM->pgm.s.LiveSave.Mmio2.cZeroPages++;
1078 pVM->pgm.s.LiveSave.cSavedPages++;
1079 iPageLast = iPage;
1080 }
1081
1082 PGM_LOCK_VOID(pVM);
1083 }
1084 PGM_UNLOCK(pVM);
1085 }
1086
1087 return rc;
1088}
1089
1090
1091/**
1092 * Cleans up MMIO2 pages after a live save.
1093 *
1094 * @param pVM The cross context VM structure.
1095 */
1096static void pgmR3DoneMmio2Pages(PVM pVM)
1097{
1098 /*
1099 * Free the tracking structures for the MMIO2 pages.
1100 * We do the freeing outside the lock in case the VM is running.
1101 */
1102 PGM_LOCK_VOID(pVM);
1103 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
1104 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
1105 {
1106 PPGMREGMMIO2RANGE const pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
1107 void *pvMmio2ToFree = pRegMmio2->paLSPages;
1108 if (pvMmio2ToFree)
1109 {
1110 pRegMmio2->paLSPages = NULL;
1111 PGM_UNLOCK(pVM);
1112 MMR3HeapFree(pvMmio2ToFree);
1113 PGM_LOCK_VOID(pVM);
1114 }
1115 }
1116 PGM_UNLOCK(pVM);
1117}
1118
1119
1120/**
1121 * Prepares the RAM pages for a live save.
1122 *
1123 * @returns VBox status code.
1124 * @param pVM The cross context VM structure.
1125 */
1126static int pgmR3PrepRamPages(PVM pVM)
1127{
1128
1129 /*
1130 * Try allocating tracking structures for the ram ranges.
1131 *
1132 * To avoid lock contention, we leave the lock every time we're allocating
1133 * a new array. This means we'll have to ditch the allocation and start
1134 * all over again if the RAM range list changes in-between.
1135 *
1136 * Note! pgmR3SaveDone will always be called and it is therefore responsible
1137 * for cleaning up.
1138 */
1139 PGM_LOCK_VOID(pVM);
1140 uint32_t idRamRange;
1141 do
1142 {
1143 uint32_t const idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U);
1144 for (idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++)
1145 {
1146 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange];
1147 Assert(pCur || idRamRange == 0);
1148 if (!pCur) continue;
1149 Assert(pCur->idRange == idRamRange);
1150
1151 if ( !pCur->paLSPages
1152 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1153 {
1154 uint32_t const idRamRangesGen = pVM->pgm.s.RamRangeUnion.idGeneration;
1155 uint32_t const cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1156 PGM_UNLOCK(pVM);
1157 PPGMLIVESAVERAMPAGE paLSPages = (PPGMLIVESAVERAMPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVERAMPAGE));
1158 if (!paLSPages)
1159 return VERR_NO_MEMORY;
1160 PGM_LOCK_VOID(pVM);
1161 if (pVM->pgm.s.RamRangeUnion.idGeneration != idRamRangesGen)
1162 {
1163 PGM_UNLOCK(pVM);
1164 MMR3HeapFree(paLSPages);
1165 PGM_LOCK_VOID(pVM);
1166 break; /* try again */
1167 }
1168 pCur->paLSPages = paLSPages;
1169
1170 /*
1171 * Initialize the array.
1172 */
1173 uint32_t iPage = cPages;
1174 while (iPage-- > 0)
1175 {
1176 /** @todo yield critsect! (after moving this away from EMT0) */
1177 PCPGMPAGE pPage = &pCur->aPages[iPage];
1178 paLSPages[iPage].cDirtied = 0;
1179 paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */
1180 paLSPages[iPage].fWriteMonitored = 0;
1181 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1182 paLSPages[iPage].u2Reserved = 0;
1183 switch (PGM_PAGE_GET_TYPE(pPage))
1184 {
1185 case PGMPAGETYPE_RAM:
1186 if ( PGM_PAGE_IS_ZERO(pPage)
1187 || PGM_PAGE_IS_BALLOONED(pPage))
1188 {
1189 paLSPages[iPage].fZero = 1;
1190 paLSPages[iPage].fShared = 0;
1191#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1192 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1193#endif
1194 }
1195 else if (PGM_PAGE_IS_SHARED(pPage))
1196 {
1197 paLSPages[iPage].fZero = 0;
1198 paLSPages[iPage].fShared = 1;
1199#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1200 paLSPages[iPage].u32Crc = UINT32_MAX;
1201#endif
1202 }
1203 else
1204 {
1205 paLSPages[iPage].fZero = 0;
1206 paLSPages[iPage].fShared = 0;
1207#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1208 paLSPages[iPage].u32Crc = UINT32_MAX;
1209#endif
1210 }
1211 paLSPages[iPage].fIgnore = 0;
1212 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1213 break;
1214
1215 case PGMPAGETYPE_ROM_SHADOW:
1216 case PGMPAGETYPE_ROM:
1217 {
1218 paLSPages[iPage].fZero = 0;
1219 paLSPages[iPage].fShared = 0;
1220 paLSPages[iPage].fDirty = 0;
1221 paLSPages[iPage].fIgnore = 1;
1222#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1223 paLSPages[iPage].u32Crc = UINT32_MAX;
1224#endif
1225 pVM->pgm.s.LiveSave.cIgnoredPages++;
1226 break;
1227 }
1228
1229 default:
1230 AssertMsgFailed(("%R[pgmpage]", pPage));
1231 RT_FALL_THRU();
1232 case PGMPAGETYPE_MMIO2:
1233 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
1234 paLSPages[iPage].fZero = 0;
1235 paLSPages[iPage].fShared = 0;
1236 paLSPages[iPage].fDirty = 0;
1237 paLSPages[iPage].fIgnore = 1;
1238#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1239 paLSPages[iPage].u32Crc = UINT32_MAX;
1240#endif
1241 pVM->pgm.s.LiveSave.cIgnoredPages++;
1242 break;
1243
1244 case PGMPAGETYPE_MMIO:
1245 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
1246 paLSPages[iPage].fZero = 0;
1247 paLSPages[iPage].fShared = 0;
1248 paLSPages[iPage].fDirty = 0;
1249 paLSPages[iPage].fIgnore = 1;
1250#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1251 paLSPages[iPage].u32Crc = UINT32_MAX;
1252#endif
1253 pVM->pgm.s.LiveSave.cIgnoredPages++;
1254 break;
1255 }
1256 }
1257 }
1258 }
1259 } while (idRamRange <= RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U));
1260 PGM_UNLOCK(pVM);
1261
1262 return VINF_SUCCESS;
1263}
1264
1265
1266/**
1267 * Saves the RAM configuration.
1268 *
1269 * @returns VBox status code.
1270 * @param pVM The cross context VM structure.
1271 * @param pSSM The saved state handle.
1272 */
1273static int pgmR3SaveRamConfig(PVM pVM, PSSMHANDLE pSSM)
1274{
1275 uint32_t cbRamHole = 0;
1276 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
1277 AssertRCReturn(rc, rc);
1278
1279 uint64_t cbRam = 0;
1280 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
1281 AssertRCReturn(rc, rc);
1282
1283 SSMR3PutU32(pSSM, cbRamHole);
1284 return SSMR3PutU64(pSSM, cbRam);
1285}
1286
1287
1288/**
1289 * Loads and verifies the RAM configuration.
1290 *
1291 * @returns VBox status code.
1292 * @param pVM The cross context VM structure.
1293 * @param pSSM The saved state handle.
1294 */
1295static int pgmR3LoadRamConfig(PVM pVM, PSSMHANDLE pSSM)
1296{
1297 uint32_t cbRamHoleCfg = 0;
1298 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHoleCfg, MM_RAM_HOLE_SIZE_DEFAULT);
1299 AssertRCReturn(rc, rc);
1300
1301 uint64_t cbRamCfg = 0;
1302 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRamCfg, 0);
1303 AssertRCReturn(rc, rc);
1304
1305 uint32_t cbRamHoleSaved;
1306 SSMR3GetU32(pSSM, &cbRamHoleSaved);
1307
1308 uint64_t cbRamSaved;
1309 rc = SSMR3GetU64(pSSM, &cbRamSaved);
1310 AssertRCReturn(rc, rc);
1311
1312 if ( cbRamHoleCfg != cbRamHoleSaved
1313 || cbRamCfg != cbRamSaved)
1314 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Ram config mismatch: saved=%RX64/%RX32 config=%RX64/%RX32 (RAM/Hole)"),
1315 cbRamSaved, cbRamHoleSaved, cbRamCfg, cbRamHoleCfg);
1316 return VINF_SUCCESS;
1317}
1318
1319#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1320
1321/**
1322 * Calculates the CRC-32 for a RAM page and updates the live save page tracking
1323 * info with it.
1324 *
1325 * @param pVM The cross context VM structure.
1326 * @param pCur The current RAM range.
1327 * @param paLSPages The current array of live save page tracking
1328 * structures.
1329 * @param iPage The page index.
1330 */
1331static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
1332{
1333 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1334 PGMPAGEMAPLOCK PgMpLck;
1335 void const *pvPage;
1336 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1337 if (RT_SUCCESS(rc))
1338 {
1339 paLSPages[iPage].u32Crc = RTCrc32(pvPage, GUEST_PAGE_SIZE);
1340 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1341 }
1342 else
1343 paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */
1344}
1345
1346
1347/**
1348 * Verifies the CRC-32 for a page given it's raw bits.
1349 *
1350 * @param pvPage The page bits.
1351 * @param pCur The current RAM range.
1352 * @param paLSPages The current array of live save page tracking
1353 * structures.
1354 * @param iPage The page index.
1355 */
1356static void pgmR3StateVerifyCrc32ForPage(void const *pvPage, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1357{
1358 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1359 {
1360 uint32_t u32Crc = RTCrc32(pvPage, GUEST_PAGE_SIZE);
1361 Assert( ( !PGM_PAGE_IS_ZERO(&pCur->aPages[iPage])
1362 && !PGM_PAGE_IS_BALLOONED(&pCur->aPages[iPage]))
1363 || u32Crc == PGM_STATE_CRC32_ZERO_PAGE);
1364 AssertMsg(paLSPages[iPage].u32Crc == u32Crc,
1365 ("%08x != %08x for %RGp %R[pgmpage] %s\n", paLSPages[iPage].u32Crc, u32Crc,
1366 pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage], pszWhere));
1367 }
1368}
1369
1370
1371/**
1372 * Verifies the CRC-32 for a RAM page.
1373 *
1374 * @param pVM The cross context VM structure.
1375 * @param pCur The current RAM range.
1376 * @param paLSPages The current array of live save page tracking
1377 * structures.
1378 * @param iPage The page index.
1379 */
1380static void pgmR3StateVerifyCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage, const char *pszWhere)
1381{
1382 if (paLSPages[iPage].u32Crc != UINT32_MAX)
1383 {
1384 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1385 PGMPAGEMAPLOCK PgMpLck;
1386 void const *pvPage;
1387 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage, &PgMpLck);
1388 if (RT_SUCCESS(rc))
1389 {
1390 pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage, pszWhere);
1391 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1392 }
1393 }
1394}
1395
1396#endif /* PGMLIVESAVERAMPAGE_WITH_CRC32 */
1397
1398/**
1399 * Scan for RAM page modifications and reprotect them.
1400 *
1401 * @param pVM The cross context VM structure.
1402 * @param fFinalPass Whether this is the final pass or not.
1403 */
1404static void pgmR3ScanRamPages(PVM pVM, bool fFinalPass)
1405{
1406 /*
1407 * The RAM.
1408 */
1409 RTGCPHYS GCPhysCur = 0;
1410 uint32_t idxLookup;
1411 uint32_t cLookupEntries;
1412 PGM_LOCK_VOID(pVM);
1413 do
1414 {
1415 PGM::PGMRAMRANGEGENANDLOOKUPCOUNT const RamRangeUnion = pVM->pgm.s.RamRangeUnion;
1416 Assert(pVM->pgm.s.RamRangeUnion.cLookupEntries < RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
1417 cLookupEntries = pVM->pgm.s.RamRangeUnion.cLookupEntries;
1418 for (idxLookup = 0; idxLookup < cLookupEntries; idxLookup++)
1419 {
1420 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
1421 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
1422 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange];
1423 AssertContinue(pCur);
1424 Assert(pCur->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]));
1425
1426 if ( pCur->GCPhysLast > GCPhysCur
1427 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1428 {
1429 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1430 uint32_t cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1431 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> GUEST_PAGE_SHIFT;
1432 GCPhysCur = 0;
1433 for (; iPage < cPages; iPage++)
1434 {
1435 /* Do yield first. */
1436 if ( !fFinalPass
1437#ifndef PGMLIVESAVERAMPAGE_WITH_CRC32
1438 && (iPage & 0x7ff) == 0x100
1439#endif
1440 && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX)
1441 && pVM->pgm.s.RamRangeUnion.u64Combined != RamRangeUnion.u64Combined)
1442 {
1443 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1444 break; /* restart */
1445 }
1446
1447 /* Skip already ignored pages. */
1448 if (paLSPages[iPage].fIgnore)
1449 continue;
1450
1451 if (RT_LIKELY(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM))
1452 {
1453 /*
1454 * A RAM page.
1455 */
1456 switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
1457 {
1458 case PGM_PAGE_STATE_ALLOCATED:
1459 /** @todo Optimize this: Don't always re-enable write
1460 * monitoring if the page is known to be very busy. */
1461 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1462 {
1463 AssertMsg(paLSPages[iPage].fWriteMonitored,
1464 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage]));
1465 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1466 Assert(pVM->pgm.s.cWrittenToPages > 0);
1467 pVM->pgm.s.cWrittenToPages--;
1468 }
1469 else
1470 {
1471 AssertMsg(!paLSPages[iPage].fWriteMonitored,
1472 ("%RGp %R[pgmpage]\n", pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT), &pCur->aPages[iPage]));
1473 pVM->pgm.s.LiveSave.Ram.cMonitoredPages++;
1474 }
1475
1476 if (!paLSPages[iPage].fDirty)
1477 {
1478 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1479 if (paLSPages[iPage].fZero)
1480 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1481 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1482 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1483 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1484 }
1485
1486 pgmPhysPageWriteMonitor(pVM, &pCur->aPages[iPage],
1487 pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT));
1488 paLSPages[iPage].fWriteMonitored = 1;
1489 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1490 paLSPages[iPage].fDirty = 1;
1491 paLSPages[iPage].fZero = 0;
1492 paLSPages[iPage].fShared = 0;
1493#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1494 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1495#endif
1496 break;
1497
1498 case PGM_PAGE_STATE_WRITE_MONITORED:
1499 Assert(paLSPages[iPage].fWriteMonitored);
1500 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) == 0)
1501 {
1502#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1503 if (paLSPages[iPage].fWriteMonitoredJustNow)
1504 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1505 else
1506 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "scan");
1507#endif
1508 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1509 }
1510 else
1511 {
1512 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1513#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1514 paLSPages[iPage].u32Crc = UINT32_MAX; /* invalid */
1515#endif
1516 if (!paLSPages[iPage].fDirty)
1517 {
1518 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1519 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1520 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1521 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1522 }
1523 }
1524 break;
1525
1526 case PGM_PAGE_STATE_ZERO:
1527 case PGM_PAGE_STATE_BALLOONED:
1528 if (!paLSPages[iPage].fZero)
1529 {
1530 if (!paLSPages[iPage].fDirty)
1531 {
1532 paLSPages[iPage].fDirty = 1;
1533 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1534 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1535 }
1536 paLSPages[iPage].fZero = 1;
1537 paLSPages[iPage].fShared = 0;
1538#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1539 paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
1540#endif
1541 }
1542 break;
1543
1544 case PGM_PAGE_STATE_SHARED:
1545 if (!paLSPages[iPage].fShared)
1546 {
1547 if (!paLSPages[iPage].fDirty)
1548 {
1549 paLSPages[iPage].fDirty = 1;
1550 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1551 if (paLSPages[iPage].fZero)
1552 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1553 pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
1554 }
1555 paLSPages[iPage].fZero = 0;
1556 paLSPages[iPage].fShared = 1;
1557#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1558 pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
1559#endif
1560 }
1561 break;
1562 }
1563 }
1564 else
1565 {
1566 /*
1567 * All other types => Ignore the page.
1568 */
1569 Assert(!paLSPages[iPage].fIgnore); /* skipped before switch */
1570 paLSPages[iPage].fIgnore = 1;
1571 if (paLSPages[iPage].fWriteMonitored)
1572 {
1573 /** @todo this doesn't hold water when we start monitoring MMIO2 and ROM shadow
1574 * pages! */
1575 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
1576 {
1577 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
1578 PGM_PAGE_SET_STATE(pVM, &pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
1579 Assert(pVM->pgm.s.cMonitoredPages > 0);
1580 pVM->pgm.s.cMonitoredPages--;
1581 }
1582 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1583 {
1584 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, &pCur->aPages[iPage]);
1585 Assert(pVM->pgm.s.cWrittenToPages > 0);
1586 pVM->pgm.s.cWrittenToPages--;
1587 }
1588 pVM->pgm.s.LiveSave.Ram.cMonitoredPages--;
1589 }
1590
1591 /** @todo the counting doesn't quite work out here. fix later? */
1592 if (paLSPages[iPage].fDirty)
1593 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1594 else
1595 {
1596 pVM->pgm.s.LiveSave.Ram.cReadyPages--;
1597 if (paLSPages[iPage].fZero)
1598 pVM->pgm.s.LiveSave.Ram.cZeroPages--;
1599 }
1600 pVM->pgm.s.LiveSave.cIgnoredPages++;
1601 }
1602 } /* for each page in range */
1603
1604 if (GCPhysCur != 0)
1605 break; /* Yield + ramrange change */
1606 GCPhysCur = pCur->GCPhysLast;
1607 }
1608 } /* for each range */
1609
1610 /* We must use the starting lookup count here to determine whether we've
1611 been thru all or not, since using the current count could lead us to
1612 skip the final range if one was umapped while we yielded the lock. */
1613 } while (idxLookup < cLookupEntries);
1614 PGM_UNLOCK(pVM);
1615}
1616
1617
1618/**
1619 * Save quiescent RAM pages.
1620 *
1621 * @returns VBox status code.
1622 * @param pVM The cross context VM structure.
1623 * @param pSSM The SSM handle.
1624 * @param fLiveSave Whether it's a live save or not.
1625 * @param uPass The pass number.
1626 */
1627static int pgmR3SaveRamPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
1628{
1629 NOREF(fLiveSave);
1630
1631 /*
1632 * The RAM.
1633 */
1634 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
1635 RTGCPHYS GCPhysCur = 0;
1636 uint32_t idxLookup;
1637 uint32_t cRamRangeLookupEntries;
1638
1639 PGM_LOCK_VOID(pVM);
1640 do
1641 {
1642 uint32_t const idRamRangesGen = pVM->pgm.s.RamRangeUnion.idGeneration;
1643 cRamRangeLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
1644 for (idxLookup = 0; idxLookup < cRamRangeLookupEntries; idxLookup++)
1645 {
1646 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
1647 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
1648 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange];
1649 AssertContinue(pCur);
1650 Assert(pCur->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]));
1651
1652 if ( pCur->GCPhysLast > GCPhysCur
1653 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1654 {
1655 PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
1656 uint32_t cPages = pCur->cb >> GUEST_PAGE_SHIFT;
1657 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> GUEST_PAGE_SHIFT;
1658 GCPhysCur = 0;
1659 for (; iPage < cPages; iPage++)
1660 {
1661 /* Do yield first. */
1662 if ( uPass != SSM_PASS_FINAL
1663 && (iPage & 0x7ff) == 0x100
1664 && PDMR3CritSectYield(pVM, &pVM->pgm.s.CritSectX)
1665 && pVM->pgm.s.RamRangeUnion.idGeneration != idRamRangesGen)
1666 {
1667 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1668 break; /* restart */
1669 }
1670
1671 PPGMPAGE pCurPage = &pCur->aPages[iPage];
1672
1673 /*
1674 * Only save pages that haven't changed since last scan and are dirty.
1675 */
1676 if ( uPass != SSM_PASS_FINAL
1677 && paLSPages)
1678 {
1679 if (!paLSPages[iPage].fDirty)
1680 continue;
1681 if (paLSPages[iPage].fWriteMonitoredJustNow)
1682 continue;
1683 if (paLSPages[iPage].fIgnore)
1684 continue;
1685 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM) /* in case of recent remappings */
1686 continue;
1687 if ( PGM_PAGE_GET_STATE(pCurPage)
1688 != ( paLSPages[iPage].fZero
1689 ? PGM_PAGE_STATE_ZERO
1690 : paLSPages[iPage].fShared
1691 ? PGM_PAGE_STATE_SHARED
1692 : PGM_PAGE_STATE_WRITE_MONITORED))
1693 continue;
1694 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) > 0)
1695 continue;
1696 }
1697 else
1698 {
1699 if ( paLSPages
1700 && !paLSPages[iPage].fDirty
1701 && !paLSPages[iPage].fIgnore)
1702 {
1703#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1704 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1705 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#1");
1706#endif
1707 continue;
1708 }
1709 if (PGM_PAGE_GET_TYPE(pCurPage) != PGMPAGETYPE_RAM)
1710 continue;
1711 }
1712
1713 /*
1714 * Do the saving outside the PGM critsect since SSM may block on I/O.
1715 */
1716 int rc;
1717 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
1718 bool fZero = PGM_PAGE_IS_ZERO(pCurPage);
1719 bool fBallooned = PGM_PAGE_IS_BALLOONED(pCurPage);
1720 bool fSkipped = false;
1721
1722 if (!fZero && !fBallooned)
1723 {
1724 /*
1725 * Copy the page and then save it outside the lock (since any
1726 * SSM call may block).
1727 */
1728 uint8_t abPage[GUEST_PAGE_SIZE];
1729 PGMPAGEMAPLOCK PgMpLck;
1730 void const *pvPage;
1731 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pCurPage, GCPhys, &pvPage, &PgMpLck);
1732 if (RT_SUCCESS(rc))
1733 {
1734 memcpy(abPage, pvPage, GUEST_PAGE_SIZE);
1735#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1736 if (paLSPages)
1737 pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage, "save#3");
1738#endif
1739 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1740 }
1741 PGM_UNLOCK(pVM);
1742 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
1743
1744 /* Try save some memory when restoring. */
1745 if (!ASMMemIsZero(pvPage, GUEST_PAGE_SIZE))
1746 {
1747 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1748 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
1749 else
1750 {
1751 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
1752 SSMR3PutGCPhys(pSSM, GCPhys);
1753 }
1754 rc = SSMR3PutMem(pSSM, abPage, GUEST_PAGE_SIZE);
1755 }
1756 else
1757 {
1758 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1759 rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO);
1760 else
1761 {
1762 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO | PGM_STATE_REC_FLAG_ADDR);
1763 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1764 }
1765 }
1766 }
1767 else
1768 {
1769 /*
1770 * Dirty zero or ballooned page.
1771 */
1772#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1773 if (paLSPages)
1774 pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage, "save#2");
1775#endif
1776 PGM_UNLOCK(pVM);
1777
1778 uint8_t u8RecType = fBallooned ? PGM_STATE_REC_RAM_BALLOONED : PGM_STATE_REC_RAM_ZERO;
1779 if (GCPhys == GCPhysLast + GUEST_PAGE_SIZE)
1780 rc = SSMR3PutU8(pSSM, u8RecType);
1781 else
1782 {
1783 SSMR3PutU8(pSSM, u8RecType | PGM_STATE_REC_FLAG_ADDR);
1784 rc = SSMR3PutGCPhys(pSSM, GCPhys);
1785 }
1786 }
1787 if (RT_FAILURE(rc))
1788 return rc;
1789
1790 PGM_LOCK_VOID(pVM);
1791 if (!fSkipped)
1792 GCPhysLast = GCPhys;
1793 if (paLSPages)
1794 {
1795 paLSPages[iPage].fDirty = 0;
1796 pVM->pgm.s.LiveSave.Ram.cReadyPages++;
1797 if (fZero)
1798 pVM->pgm.s.LiveSave.Ram.cZeroPages++;
1799 pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
1800 pVM->pgm.s.LiveSave.cSavedPages++;
1801 }
1802 if (idRamRangesGen != pVM->pgm.s.RamRangeUnion.idGeneration)
1803 {
1804 GCPhysCur = GCPhys | GUEST_PAGE_OFFSET_MASK;
1805 break; /* restart */
1806 }
1807
1808 } /* for each page in range */
1809
1810 if (GCPhysCur != 0)
1811 break; /* Yield + ramrange change */
1812 GCPhysCur = pCur->GCPhysLast;
1813 }
1814 } /* for each range */
1815
1816 /* We must use the starting lookup count here to determine whether we've
1817 been thru all or not, since using the current count could lead us to
1818 skip the final range if one was umapped while we yielded the lock. */
1819 } while (idxLookup < cRamRangeLookupEntries);
1820
1821 PGM_UNLOCK(pVM);
1822
1823 return VINF_SUCCESS;
1824}
1825
1826
1827/**
1828 * Cleans up RAM pages after a live save.
1829 *
1830 * @param pVM The cross context VM structure.
1831 */
1832static void pgmR3DoneRamPages(PVM pVM)
1833{
1834 /*
1835 * Free the tracking arrays and disable write monitoring.
1836 *
1837 * Play nice with the PGM lock in case we're called while the VM is still
1838 * running. This means we have to delay the freeing since we wish to use
1839 * paLSPages as an indicator of which RAM ranges which we need to scan for
1840 * write monitored pages.
1841 */
1842 void *pvToFree = NULL;
1843 uint32_t cMonitoredPages = 0;
1844 uint32_t idRamRangeMax;
1845 uint32_t idRamRange;
1846 PGM_LOCK_VOID(pVM);
1847 do
1848 {
1849 idRamRangeMax = RT_MIN(pVM->pgm.s.idRamRangeMax, RT_ELEMENTS(pVM->pgm.s.apRamRanges) - 1U);
1850 for (idRamRange = 0; idRamRange <= idRamRangeMax; idRamRange++)
1851 {
1852 PPGMRAMRANGE const pCur = pVM->pgm.s.apRamRanges[idRamRange];
1853 Assert(pCur || idRamRange == 0);
1854 if (!pCur) continue;
1855 Assert(pCur->idRange == idRamRange);
1856
1857 if (pCur->paLSPages)
1858 {
1859 if (pvToFree)
1860 {
1861 uint32_t const idRamRangesGen = pVM->pgm.s.RamRangeUnion.idGeneration;
1862 PGM_UNLOCK(pVM);
1863 MMR3HeapFree(pvToFree);
1864 pvToFree = NULL;
1865 PGM_LOCK_VOID(pVM);
1866 if (idRamRangesGen != pVM->pgm.s.RamRangeUnion.idGeneration)
1867 break; /* start over again. */
1868 }
1869
1870 pvToFree = pCur->paLSPages;
1871 pCur->paLSPages = NULL;
1872
1873 uint32_t iPage = pCur->cb >> GUEST_PAGE_SHIFT;
1874 while (iPage--)
1875 {
1876 PPGMPAGE pPage = &pCur->aPages[iPage];
1877 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
1878 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1879 {
1880 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1881 cMonitoredPages++;
1882 }
1883 }
1884 }
1885 }
1886 } while (idRamRange <= idRamRangeMax);
1887
1888 Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages);
1889 if (pVM->pgm.s.cMonitoredPages < cMonitoredPages)
1890 pVM->pgm.s.cMonitoredPages = 0;
1891 else
1892 pVM->pgm.s.cMonitoredPages -= cMonitoredPages;
1893
1894 PGM_UNLOCK(pVM);
1895
1896 MMR3HeapFree(pvToFree);
1897 pvToFree = NULL;
1898}
1899
1900
1901/**
1902 * @callback_method_impl{FNSSMINTLIVEEXEC}
1903 */
1904static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1905{
1906 int rc;
1907
1908 /*
1909 * Save the MMIO2 and ROM range IDs in pass 0.
1910 */
1911 if (uPass == 0)
1912 {
1913 rc = pgmR3SaveRamConfig(pVM, pSSM);
1914 if (RT_FAILURE(rc))
1915 return rc;
1916 rc = pgmR3SaveRomRanges(pVM, pSSM);
1917 if (RT_FAILURE(rc))
1918 return rc;
1919 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1920 if (RT_FAILURE(rc))
1921 return rc;
1922 }
1923 /*
1924 * Reset the page-per-second estimate to avoid inflation by the initial
1925 * load of zero pages. pgmR3LiveVote ASSUMES this is done at pass 7.
1926 */
1927 else if (uPass == 7)
1928 {
1929 pVM->pgm.s.LiveSave.cSavedPages = 0;
1930 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
1931 }
1932
1933 /*
1934 * Do the scanning.
1935 */
1936 pgmR3ScanRomPages(pVM);
1937 pgmR3ScanMmio2Pages(pVM, uPass);
1938 pgmR3ScanRamPages(pVM, false /*fFinalPass*/);
1939 pgmR3PoolClearAll(pVM, true /*fFlushRemTlb*/); /** @todo this could perhaps be optimized a bit. */
1940
1941 /*
1942 * Save the pages.
1943 */
1944 if (uPass == 0)
1945 rc = pgmR3SaveRomVirginPages( pVM, pSSM, true /*fLiveSave*/);
1946 else
1947 rc = VINF_SUCCESS;
1948 if (RT_SUCCESS(rc))
1949 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
1950 if (RT_SUCCESS(rc))
1951 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, uPass);
1952 if (RT_SUCCESS(rc))
1953 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, uPass);
1954 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes care of it.) */
1955
1956 return rc;
1957}
1958
1959
1960/**
1961 * @callback_method_impl{FNSSMINTLIVEVOTE}
1962 */
1963static DECLCALLBACK(int) pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1964{
1965 /*
1966 * Update and calculate parameters used in the decision making.
1967 */
1968 const uint32_t cHistoryEntries = RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory);
1969
1970 /* update history. */
1971 PGM_LOCK_VOID(pVM);
1972 uint32_t const cWrittenToPages = pVM->pgm.s.cWrittenToPages;
1973 PGM_UNLOCK(pVM);
1974 uint32_t const cDirtyNow = pVM->pgm.s.LiveSave.Rom.cDirtyPages
1975 + pVM->pgm.s.LiveSave.Mmio2.cDirtyPages
1976 + pVM->pgm.s.LiveSave.Ram.cDirtyPages
1977 + cWrittenToPages;
1978 uint32_t i = pVM->pgm.s.LiveSave.iDirtyPagesHistory;
1979 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = cDirtyNow;
1980 pVM->pgm.s.LiveSave.iDirtyPagesHistory = (i + 1) % cHistoryEntries;
1981
1982 /* calc shortterm average (4 passes). */
1983 AssertCompile(RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory) > 4);
1984 uint64_t cTotal = pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1985 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 1) % cHistoryEntries];
1986 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 2) % cHistoryEntries];
1987 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 3) % cHistoryEntries];
1988 uint32_t const cDirtyPagesShort = cTotal / 4;
1989 pVM->pgm.s.LiveSave.cDirtyPagesShort = cDirtyPagesShort;
1990
1991 /* calc longterm average. */
1992 cTotal = 0;
1993 if (uPass < cHistoryEntries)
1994 for (i = 0; i < cHistoryEntries && i <= uPass; i++)
1995 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1996 else
1997 for (i = 0; i < cHistoryEntries; i++)
1998 cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
1999 uint32_t const cDirtyPagesLong = cTotal / cHistoryEntries;
2000 pVM->pgm.s.LiveSave.cDirtyPagesLong = cDirtyPagesLong;
2001
2002 /* estimate the speed */
2003 uint64_t cNsElapsed = RTTimeNanoTS() - pVM->pgm.s.LiveSave.uSaveStartNS;
2004 uint32_t cPagesPerSecond = (uint32_t)( (long double)pVM->pgm.s.LiveSave.cSavedPages
2005 / ((long double)cNsElapsed / 1000000000.0) );
2006 pVM->pgm.s.LiveSave.cPagesPerSecond = cPagesPerSecond;
2007
2008 /*
2009 * Try make a decision.
2010 */
2011 if ( cDirtyPagesShort <= cDirtyPagesLong
2012 && ( cDirtyNow <= cDirtyPagesShort
2013 || cDirtyNow - cDirtyPagesShort < RT_MIN(cDirtyPagesShort / 8, 16)
2014 )
2015 )
2016 {
2017 if (uPass > 10)
2018 {
2019 uint32_t cMsLeftShort = (uint32_t)(cDirtyPagesShort / (long double)cPagesPerSecond * 1000.0);
2020 uint32_t cMsLeftLong = (uint32_t)(cDirtyPagesLong / (long double)cPagesPerSecond * 1000.0);
2021 uint32_t cMsMaxDowntime = SSMR3HandleMaxDowntime(pSSM);
2022 if (cMsMaxDowntime < 32)
2023 cMsMaxDowntime = 32;
2024 if ( ( cMsLeftLong <= cMsMaxDowntime
2025 && cMsLeftShort < cMsMaxDowntime)
2026 || cMsLeftShort < cMsMaxDowntime / 2
2027 )
2028 {
2029 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u|%ums cDirtyPagesLong=%u|%ums cMsMaxDowntime=%u\n",
2030 uPass, cDirtyPagesShort, cMsLeftShort, cDirtyPagesLong, cMsLeftLong, cMsMaxDowntime));
2031 return VINF_SUCCESS;
2032 }
2033 }
2034 else
2035 {
2036 if ( ( cDirtyPagesShort <= 128
2037 && cDirtyPagesLong <= 1024)
2038 || cDirtyPagesLong <= 256
2039 )
2040 {
2041 Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u cDirtyPagesLong=%u\n", uPass, cDirtyPagesShort, cDirtyPagesLong));
2042 return VINF_SUCCESS;
2043 }
2044 }
2045 }
2046
2047 /*
2048 * Come up with a completion percentage. Currently this is a simple
2049 * dirty page (long term) vs. total pages ratio + some pass trickery.
2050 */
2051 unsigned uPctDirty = (unsigned)( (long double)cDirtyPagesLong
2052 / (pVM->pgm.s.cAllPages - pVM->pgm.s.LiveSave.cIgnoredPages - pVM->pgm.s.cZeroPages) );
2053 if (uPctDirty <= 100)
2054 SSMR3HandleReportLivePercent(pSSM, RT_MIN(100 - uPctDirty, uPass * 2));
2055 else
2056 AssertMsgFailed(("uPctDirty=%u cDirtyPagesLong=%#x cAllPages=%#x cIgnoredPages=%#x cZeroPages=%#x\n",
2057 uPctDirty, cDirtyPagesLong, pVM->pgm.s.cAllPages, pVM->pgm.s.LiveSave.cIgnoredPages, pVM->pgm.s.cZeroPages));
2058
2059 return VINF_SSM_VOTE_FOR_ANOTHER_PASS;
2060}
2061
2062
2063/**
2064 * @callback_method_impl{FNSSMINTLIVEPREP}
2065 *
2066 * This will attempt to allocate and initialize the tracking structures. It
2067 * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
2068 * pgmR3SaveDone will do the cleanups.
2069 */
2070static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
2071{
2072 /*
2073 * Indicate that we will be using the write monitoring.
2074 */
2075 PGM_LOCK_VOID(pVM);
2076 /** @todo find a way of mediating this when more users are added. */
2077 if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
2078 {
2079 PGM_UNLOCK(pVM);
2080 AssertLogRelFailedReturn(VERR_PGM_WRITE_MONITOR_ENGAGED);
2081 }
2082 pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
2083 PGM_UNLOCK(pVM);
2084
2085 /*
2086 * Initialize the statistics.
2087 */
2088 pVM->pgm.s.LiveSave.Rom.cReadyPages = 0;
2089 pVM->pgm.s.LiveSave.Rom.cDirtyPages = 0;
2090 pVM->pgm.s.LiveSave.Mmio2.cReadyPages = 0;
2091 pVM->pgm.s.LiveSave.Mmio2.cDirtyPages = 0;
2092 pVM->pgm.s.LiveSave.Ram.cReadyPages = 0;
2093 pVM->pgm.s.LiveSave.Ram.cDirtyPages = 0;
2094 pVM->pgm.s.LiveSave.cIgnoredPages = 0;
2095 pVM->pgm.s.LiveSave.fActive = true;
2096 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory); i++)
2097 pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = UINT32_MAX / 2;
2098 pVM->pgm.s.LiveSave.iDirtyPagesHistory = 0;
2099 pVM->pgm.s.LiveSave.cSavedPages = 0;
2100 pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
2101 pVM->pgm.s.LiveSave.cPagesPerSecond = 8192;
2102
2103 /*
2104 * Per page type.
2105 */
2106 int rc = pgmR3PrepRomPages(pVM);
2107 if (RT_SUCCESS(rc))
2108 rc = pgmR3PrepMmio2Pages(pVM);
2109 if (RT_SUCCESS(rc))
2110 rc = pgmR3PrepRamPages(pVM);
2111
2112 NOREF(pSSM);
2113 return rc;
2114}
2115
2116
2117/**
2118 * @callback_method_impl{FNSSMINTSAVEEXEC}
2119 */
2120static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
2121{
2122 PPGM pPGM = &pVM->pgm.s;
2123
2124 /*
2125 * Lock PGM and set the no-more-writes indicator.
2126 */
2127 PGM_LOCK_VOID(pVM);
2128 pVM->pgm.s.fNoMorePhysWrites = true;
2129
2130 /*
2131 * Save basic data (required / unaffected by relocation).
2132 */
2133 int rc = SSMR3PutStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFields[0], NULL /*pvUser*/);
2134
2135 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus && RT_SUCCESS(rc); idCpu++)
2136 rc = SSMR3PutStruct(pSSM, &pVM->apCpusR3[idCpu]->pgm.s, &s_aPGMCpuFields[0]);
2137
2138 /*
2139 * Save the (remainder of the) memory.
2140 */
2141 if (RT_SUCCESS(rc))
2142 {
2143 if (pVM->pgm.s.LiveSave.fActive)
2144 {
2145 pgmR3ScanRomPages(pVM);
2146 pgmR3ScanMmio2Pages(pVM, SSM_PASS_FINAL);
2147 pgmR3ScanRamPages(pVM, true /*fFinalPass*/);
2148
2149 rc = pgmR3SaveShadowedRomPages( pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
2150 if (RT_SUCCESS(rc))
2151 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2152 if (RT_SUCCESS(rc))
2153 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
2154 }
2155 else
2156 {
2157 rc = pgmR3SaveRamConfig(pVM, pSSM);
2158 if (RT_SUCCESS(rc))
2159 rc = pgmR3SaveRomRanges(pVM, pSSM);
2160 if (RT_SUCCESS(rc))
2161 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
2162 if (RT_SUCCESS(rc))
2163 rc = pgmR3SaveRomVirginPages( pVM, pSSM, false /*fLiveSave*/);
2164 if (RT_SUCCESS(rc))
2165 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
2166 if (RT_SUCCESS(rc))
2167 rc = pgmR3SaveMmio2Pages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2168 if (RT_SUCCESS(rc))
2169 rc = pgmR3SaveRamPages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
2170 }
2171 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
2172 }
2173
2174 PGM_UNLOCK(pVM);
2175 return rc;
2176}
2177
2178
2179/**
2180 * @callback_method_impl{FNSSMINTSAVEDONE}
2181 */
2182static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
2183{
2184 /*
2185 * Do per page type cleanups first.
2186 */
2187 if (pVM->pgm.s.LiveSave.fActive)
2188 {
2189 pgmR3DoneRomPages(pVM);
2190 pgmR3DoneMmio2Pages(pVM);
2191 pgmR3DoneRamPages(pVM);
2192 }
2193
2194 /*
2195 * Clear the live save indicator and disengage write monitoring.
2196 */
2197 PGM_LOCK_VOID(pVM);
2198 pVM->pgm.s.LiveSave.fActive = false;
2199 /** @todo this is blindly assuming that we're the only user of write
2200 * monitoring. Fix this when more users are added. */
2201 pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
2202 PGM_UNLOCK(pVM);
2203
2204 NOREF(pSSM);
2205 return VINF_SUCCESS;
2206}
2207
2208
2209/**
2210 * @callback_method_impl{FNSSMINTLOADPREP}
2211 */
2212static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
2213{
2214 /*
2215 * Call the reset function to make sure all the memory is cleared.
2216 */
2217 PGMR3Reset(pVM);
2218 pVM->pgm.s.LiveSave.fActive = false;
2219 NOREF(pSSM);
2220 return VINF_SUCCESS;
2221}
2222
2223
2224/**
2225 * Load an ignored page.
2226 *
2227 * @returns VBox status code.
2228 * @param pSSM The saved state handle.
2229 */
2230static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
2231{
2232 uint8_t abPage[GUEST_PAGE_SIZE];
2233 return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
2234}
2235
2236
2237/**
2238 * Compares a page with an old save type value.
2239 *
2240 * @returns true if equal, false if not.
2241 * @param pPage The page to compare.
2242 * @param uOldType The old type value from the saved state.
2243 */
2244DECLINLINE(bool) pgmR3CompareNewAndOldPageTypes(PPGMPAGE pPage, uint8_t uOldType)
2245{
2246 uint8_t uOldPageType;
2247 switch (PGM_PAGE_GET_TYPE(pPage))
2248 {
2249 case PGMPAGETYPE_INVALID: uOldPageType = PGMPAGETYPE_OLD_INVALID; break;
2250 case PGMPAGETYPE_RAM: uOldPageType = PGMPAGETYPE_OLD_RAM; break;
2251 case PGMPAGETYPE_MMIO2: uOldPageType = PGMPAGETYPE_OLD_MMIO2; break;
2252 case PGMPAGETYPE_MMIO2_ALIAS_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO2_ALIAS_MMIO; break;
2253 case PGMPAGETYPE_ROM_SHADOW: uOldPageType = PGMPAGETYPE_OLD_ROM_SHADOW; break;
2254 case PGMPAGETYPE_ROM: uOldPageType = PGMPAGETYPE_OLD_ROM; break;
2255 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: RT_FALL_THRU();
2256 case PGMPAGETYPE_MMIO: uOldPageType = PGMPAGETYPE_OLD_MMIO; break;
2257 default:
2258 AssertFailed();
2259 uOldPageType = PGMPAGETYPE_OLD_INVALID;
2260 break;
2261 }
2262 return uOldPageType == uOldType;
2263}
2264
2265
2266/**
2267 * Loads a page without any bits in the saved state, i.e. making sure it's
2268 * really zero.
2269 *
2270 * @returns VBox status code.
2271 * @param pVM The cross context VM structure.
2272 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2273 * state).
2274 * @param pPage The guest page tracking structure.
2275 * @param GCPhys The page address.
2276 * @param pRam The ram range (logging).
2277 */
2278static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2279{
2280 if ( uOldType != PGMPAGETYPE_OLD_INVALID
2281 && !pgmR3CompareNewAndOldPageTypes(pPage, uOldType))
2282 return VERR_SSM_UNEXPECTED_DATA;
2283
2284 /* I think this should be sufficient. */
2285 if ( !PGM_PAGE_IS_ZERO(pPage)
2286 && !PGM_PAGE_IS_BALLOONED(pPage))
2287 return VERR_SSM_UNEXPECTED_DATA;
2288
2289 NOREF(pVM);
2290 NOREF(GCPhys);
2291 NOREF(pRam);
2292 return VINF_SUCCESS;
2293}
2294
2295
2296/**
2297 * Loads a page from the saved state.
2298 *
2299 * @returns VBox status code.
2300 * @param pVM The cross context VM structure.
2301 * @param pSSM The SSM handle.
2302 * @param uOldType The page type or PGMPAGETYPE_OLD_INVALID (old saved
2303 * state).
2304 * @param pPage The guest page tracking structure.
2305 * @param GCPhys The page address.
2306 * @param pRam The ram range (logging).
2307 */
2308static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2309{
2310 /*
2311 * Match up the type, dealing with MMIO2 aliases (dropped).
2312 */
2313 AssertLogRelMsgReturn( uOldType == PGMPAGETYPE_INVALID
2314 || pgmR3CompareNewAndOldPageTypes(pPage, uOldType)
2315 /* kudge for the expanded PXE bios (r67885) - @bugref{5687}: */
2316 || ( uOldType == PGMPAGETYPE_OLD_RAM
2317 && GCPhys >= 0xed000
2318 && GCPhys <= 0xeffff
2319 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM)
2320 ,
2321 ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
2322 VERR_SSM_UNEXPECTED_DATA);
2323
2324 /*
2325 * Load the page.
2326 */
2327 PGMPAGEMAPLOCK PgMpLck;
2328 void *pvPage;
2329 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage, &PgMpLck);
2330 if (RT_SUCCESS(rc))
2331 {
2332 rc = SSMR3GetMem(pSSM, pvPage, GUEST_PAGE_SIZE);
2333 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2334 }
2335
2336 return rc;
2337}
2338
2339
2340/**
2341 * Loads a page (counter part to pgmR3SavePage).
2342 *
2343 * @returns VBox status code, fully bitched errors.
2344 * @param pVM The cross context VM structure.
2345 * @param pSSM The SSM handle.
2346 * @param uOldType The page type.
2347 * @param pPage The page.
2348 * @param GCPhys The page address.
2349 * @param pRam The RAM range (for error messages).
2350 */
2351static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uOldType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2352{
2353 uint8_t uState;
2354 int rc = SSMR3GetU8(pSSM, &uState);
2355 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
2356 if (uState == 0 /* zero */)
2357 rc = pgmR3LoadPageZeroOld(pVM, uOldType, pPage, GCPhys, pRam);
2358 else if (uState == 1)
2359 rc = pgmR3LoadPageBitsOld(pVM, pSSM, uOldType, pPage, GCPhys, pRam);
2360 else
2361 rc = VERR_PGM_INVALID_SAVED_PAGE_STATE;
2362 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uOldType=%d GCPhys=%RGp %s rc=%Rrc\n",
2363 pPage, uState, uOldType, GCPhys, pRam->pszDesc, rc),
2364 rc);
2365 return VINF_SUCCESS;
2366}
2367
2368
2369/**
2370 * Loads a shadowed ROM page.
2371 *
2372 * @returns VBox status code, errors are fully bitched.
2373 * @param pVM The cross context VM structure.
2374 * @param pSSM The saved state handle.
2375 * @param pPage The page.
2376 * @param GCPhys The page address.
2377 * @param pRam The RAM range (for error messages).
2378 */
2379static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
2380{
2381 /*
2382 * Load and set the protection first, then load the two pages, the first
2383 * one is the active the other is the passive.
2384 */
2385 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
2386 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2387
2388 uint8_t uProt;
2389 int rc = SSMR3GetU8(pSSM, &uProt);
2390 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
2391 PGMROMPROT enmProt = (PGMROMPROT)uProt;
2392 AssertLogRelMsgReturn( enmProt >= PGMROMPROT_INVALID
2393 && enmProt < PGMROMPROT_END,
2394 ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
2395 VERR_SSM_UNEXPECTED_DATA);
2396
2397 if (pRomPage->enmProt != enmProt)
2398 {
2399 rc = PGMR3PhysRomProtect(pVM, GCPhys, GUEST_PAGE_SIZE, enmProt);
2400 AssertLogRelRCReturn(rc, rc);
2401 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2402 }
2403
2404 PPGMPAGE pPageActive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
2405 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
2406 uint8_t u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM : PGMPAGETYPE_ROM_SHADOW;
2407 uint8_t u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
2408
2409 /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
2410 * used down the line (will the 2nd page will be written to the first
2411 * one because of a false TLB hit since the TLB is using GCPhys and
2412 * doesn't check the HCPhys of the desired page). */
2413 rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
2414 if (RT_SUCCESS(rc))
2415 {
2416 *pPageActive = *pPage;
2417 rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
2418 }
2419 return rc;
2420}
2421
2422
2423/**
2424 * Ram range flags and bits for older versions of the saved state.
2425 *
2426 * @returns VBox status code.
2427 *
2428 * @param pVM The cross context VM structure.
2429 * @param pSSM The SSM handle.
2430 * @param uVersion The saved state version.
2431 */
2432static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2433{
2434 /*
2435 * Ram range flags and bits.
2436 */
2437 uint32_t iSeqNo = 0;
2438 uint32_t const cRamRangeLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries,
2439 RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
2440 for (uint32_t idxLookup = 0; idxLookup < cRamRangeLookupEntries; idxLookup++)
2441 {
2442 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
2443 AssertContinue(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges));
2444 PPGMRAMRANGE const pRam = pVM->pgm.s.apRamRanges[idRamRange];
2445 AssertContinue(pRam);
2446
2447 /* Check the sequence number / separator. */
2448 uint32_t u32Sep;
2449 int rc = SSMR3GetU32(pSSM, &u32Sep);
2450 if (RT_FAILURE(rc))
2451 return rc;
2452 if (u32Sep == ~0U)
2453 break;
2454 if (u32Sep != iSeqNo)
2455 {
2456 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2457 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2458 }
2459 AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2460
2461 /* Get the range details. */
2462 RTGCPHYS GCPhys;
2463 SSMR3GetGCPhys(pSSM, &GCPhys);
2464 RTGCPHYS GCPhysLast;
2465 SSMR3GetGCPhys(pSSM, &GCPhysLast);
2466 RTGCPHYS cb;
2467 SSMR3GetGCPhys(pSSM, &cb);
2468 uint8_t fHaveBits;
2469 rc = SSMR3GetU8(pSSM, &fHaveBits);
2470 if (RT_FAILURE(rc))
2471 return rc;
2472 if (fHaveBits & ~1)
2473 {
2474 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2475 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2476 }
2477 size_t cchDesc = 0;
2478 char szDesc[256];
2479 szDesc[0] = '\0';
2480 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2481 {
2482 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
2483 if (RT_FAILURE(rc))
2484 return rc;
2485 /* Since we've modified the description strings in r45878, only compare
2486 them if the saved state is more recent. */
2487 if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
2488 cchDesc = strlen(szDesc);
2489 }
2490
2491 /*
2492 * Match it up with the current range.
2493 *
2494 * Note there is a hack for dealing with the high BIOS mapping
2495 * in the old saved state format, this means we might not have
2496 * a 1:1 match on success.
2497 */
2498 if ( ( GCPhys != pRam->GCPhys
2499 || GCPhysLast != pRam->GCPhysLast
2500 || cb != pRam->cb
2501 || ( cchDesc
2502 && strcmp(szDesc, pRam->pszDesc)) )
2503 /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
2504 && ( uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
2505 || GCPhys != UINT32_C(0xfff80000)
2506 || GCPhysLast != UINT32_C(0xffffffff)
2507 || pRam->GCPhysLast != GCPhysLast
2508 || pRam->GCPhys < GCPhys
2509 || !fHaveBits)
2510 )
2511 {
2512 LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
2513 "State : %RGp-%RGp %RGp bytes %s %s\n",
2514 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pbR3 ? "bits" : "nobits", pRam->pszDesc,
2515 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
2516 /*
2517 * If we're loading a state for debugging purpose, don't make a fuss if
2518 * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
2519 */
2520 if ( SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
2521 || GCPhys < 8 * _1M)
2522 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2523 N_("RAM range mismatch; saved={%RGp-%RGp %RGp bytes %s %s} config={%RGp-%RGp %RGp bytes %s %s}"),
2524 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc,
2525 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pbR3 ? "bits" : "nobits", pRam->pszDesc);
2526
2527 AssertMsgFailed(("debug skipping not implemented, sorry\n"));
2528 iSeqNo++;
2529 continue;
2530 }
2531
2532 uint32_t cPages = (GCPhysLast - GCPhys + 1) >> GUEST_PAGE_SHIFT;
2533 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2534 {
2535 /*
2536 * Load the pages one by one.
2537 */
2538 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2539 {
2540 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2541 PPGMPAGE pPage = &pRam->aPages[iPage];
2542 uint8_t uOldType;
2543 rc = SSMR3GetU8(pSSM, &uOldType);
2544 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
2545 if (uOldType == PGMPAGETYPE_OLD_ROM_SHADOW)
2546 rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
2547 else
2548 rc = pgmR3LoadPageOld(pVM, pSSM, uOldType, pPage, GCPhysPage, pRam);
2549 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2550 }
2551 }
2552 else
2553 {
2554 /*
2555 * Old format.
2556 */
2557
2558 /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
2559 The rest is generally irrelevant and wrong since the stuff have to match registrations. */
2560 uint32_t fFlags = 0;
2561 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2562 {
2563 uint16_t u16Flags;
2564 rc = SSMR3GetU16(pSSM, &u16Flags);
2565 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2566 fFlags |= u16Flags;
2567 }
2568
2569 /* Load the bits */
2570 if ( !fHaveBits
2571 && GCPhysLast < UINT32_C(0xe0000000))
2572 {
2573 /*
2574 * Dynamic chunks.
2575 */
2576 const uint32_t cPagesInChunk = (1*1024*1024) >> GUEST_PAGE_SHIFT;
2577 AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
2578 ("cPages=%#x cPagesInChunk=%#x GCPhys=%RGp %s\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
2579 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2580
2581 for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
2582 {
2583 uint8_t fPresent;
2584 rc = SSMR3GetU8(pSSM, &fPresent);
2585 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2586 AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
2587 ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
2588 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2589
2590 for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
2591 {
2592 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2593 PPGMPAGE pPage = &pRam->aPages[iPage];
2594 if (fPresent)
2595 {
2596 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO
2597 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
2598 rc = pgmR3LoadPageToDevNullOld(pSSM);
2599 else
2600 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2601 }
2602 else
2603 rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
2604 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
2605 }
2606 }
2607 }
2608 else if (pRam->pbR3)
2609 {
2610 /*
2611 * MMIO2.
2612 */
2613 AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
2614 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2615 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2616 AssertLogRelMsgReturn(pRam->pbR3,
2617 ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
2618 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2619
2620 rc = SSMR3GetMem(pSSM, pRam->pbR3, pRam->cb);
2621 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
2622 }
2623 else if (GCPhysLast < UINT32_C(0xfff80000))
2624 {
2625 /*
2626 * PCI MMIO, no pages saved.
2627 */
2628 }
2629 else
2630 {
2631 /*
2632 * Load the 0xfff80000..0xffffffff BIOS range.
2633 * It starts with X reserved pages that we have to skip over since
2634 * the RAMRANGE create by the new code won't include those.
2635 */
2636 AssertLogRelMsgReturn( !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
2637 && (fFlags & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
2638 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
2639 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2640 AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
2641 ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
2642 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2643
2644 /* Skip wasted reserved pages before the ROM. */
2645 while (GCPhys < pRam->GCPhys)
2646 {
2647 rc = pgmR3LoadPageToDevNullOld(pSSM);
2648 AssertLogRelRCReturn(rc, rc);
2649 GCPhys += GUEST_PAGE_SIZE;
2650 }
2651
2652 /* Load the bios pages. */
2653 cPages = pRam->cb >> GUEST_PAGE_SHIFT;
2654 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2655 {
2656 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT) + pRam->GCPhys;
2657 PPGMPAGE pPage = &pRam->aPages[iPage];
2658
2659 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
2660 ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
2661 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2662 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
2663 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
2664 }
2665 }
2666 }
2667
2668 iSeqNo++;
2669 }
2670
2671 return VINF_SUCCESS;
2672}
2673
2674
2675/**
2676 * Worker for pgmR3Load and pgmR3LoadLocked.
2677 *
2678 * @returns VBox status code.
2679 *
2680 * @param pVM The cross context VM structure.
2681 * @param pSSM The SSM handle.
2682 * @param uVersion The PGM saved state unit version.
2683 * @param uPass The pass number.
2684 *
2685 * @todo This needs splitting up if more record types or code twists are
2686 * added...
2687 */
2688static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2689{
2690 NOREF(uPass);
2691
2692 /*
2693 * Process page records until we hit the terminator.
2694 */
2695 RTGCPHYS GCPhys = NIL_RTGCPHYS;
2696 PPGMRAMRANGE pRamHint = NULL;
2697 uint8_t id = UINT8_MAX;
2698 uint32_t iPage = UINT32_MAX - 10;
2699 PPGMROMRANGE pRom = NULL;
2700 PPGMREGMMIO2RANGE pRegMmio2 = NULL;
2701 PPGMRAMRANGE pMmio2RamRange = NULL;
2702
2703 /*
2704 * We batch up pages that should be freed instead of calling GMM for
2705 * each and every one of them. Note that we'll lose the pages in most
2706 * failure paths - this should probably be addressed one day.
2707 */
2708 uint32_t cPendingPages = 0;
2709 PGMMFREEPAGESREQ pReq;
2710 int rc = GMMR3FreePagesPrepare(pVM, &pReq, 128 /* batch size */, GMMACCOUNT_BASE);
2711 AssertLogRelRCReturn(rc, rc);
2712
2713 for (;;)
2714 {
2715 /*
2716 * Get the record type and flags.
2717 */
2718 uint8_t u8;
2719 rc = SSMR3GetU8(pSSM, &u8);
2720 if (RT_FAILURE(rc))
2721 return rc;
2722 if (u8 == PGM_STATE_REC_END)
2723 {
2724 /*
2725 * Finish off any pages pending freeing.
2726 */
2727 if (cPendingPages)
2728 {
2729 Log(("pgmR3LoadMemory: GMMR3FreePagesPerform pVM=%p cPendingPages=%u\n", pVM, cPendingPages));
2730 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2731 AssertLogRelRCReturn(rc, rc);
2732 }
2733 GMMR3FreePagesCleanup(pReq);
2734 return VINF_SUCCESS;
2735 }
2736 AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2737 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2738 {
2739 /*
2740 * RAM page.
2741 */
2742 case PGM_STATE_REC_RAM_ZERO:
2743 case PGM_STATE_REC_RAM_RAW:
2744 case PGM_STATE_REC_RAM_BALLOONED:
2745 {
2746 /*
2747 * Get the address and resolve it into a page descriptor.
2748 */
2749 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2750 GCPhys += GUEST_PAGE_SIZE;
2751 else
2752 {
2753 rc = SSMR3GetGCPhys(pSSM, &GCPhys);
2754 if (RT_FAILURE(rc))
2755 return rc;
2756 }
2757 AssertLogRelMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
2758
2759 PPGMPAGE pPage;
2760 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
2761 if (RT_SUCCESS(rc))
2762 { /* likely */ }
2763 else if ( rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS
2764 && GCPhys < _1M
2765 && GCPhys >= 640U*_1K
2766 && (u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_RAM_ZERO)
2767 {
2768 rc = VINF_SUCCESS; /* We've kicked out unused pages between 640K and 1MB, but older states may include them. */
2769 id = UINT8_MAX;
2770 break;
2771 }
2772 else
2773 AssertLogRelMsgFailedReturn(("rc=%Rrc %RGp u8=%#x\n", rc, GCPhys, u8), rc);
2774
2775 /*
2776 * Take action according to the record type.
2777 */
2778 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2779 {
2780 case PGM_STATE_REC_RAM_ZERO:
2781 {
2782 if (PGM_PAGE_IS_ZERO(pPage))
2783 break;
2784
2785 /* Ballooned pages must be unmarked (live snapshot and
2786 teleportation scenarios). */
2787 if (PGM_PAGE_IS_BALLOONED(pPage))
2788 {
2789 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2790 if (uVersion == PGM_SAVED_STATE_VERSION_BALLOON_BROKEN)
2791 break;
2792 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2793 break;
2794 }
2795
2796 AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_UNEXPECTED_PAGE_STATE);
2797
2798 /* If this is a ROM page, we must clear it and not try to
2799 * free it. Ditto if the VM is using RamPreAlloc (see
2800 * @bugref{6318}). */
2801 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM
2802 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM_SHADOW
2803 || PGM_IS_IN_NEM_MODE(pVM)
2804 || pVM->pgm.s.fRamPreAlloc)
2805 {
2806 PGMPAGEMAPLOCK PgMpLck;
2807 void *pvDstPage;
2808 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2809 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2810
2811 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
2812 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2813 }
2814 /* Free it only if it's not part of a previously
2815 allocated large page (no need to clear the page). */
2816 else if ( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2817 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED)
2818 {
2819 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2820 AssertRCReturn(rc, rc);
2821 }
2822 /** @todo handle large pages (see @bugref{5545}) */
2823 break;
2824 }
2825
2826 case PGM_STATE_REC_RAM_BALLOONED:
2827 {
2828 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
2829 if (PGM_PAGE_IS_BALLOONED(pPage))
2830 break;
2831
2832 /* We don't map ballooned pages in our shadow page tables, let's
2833 just free it if allocated and mark as ballooned. See @bugref{5515}. */
2834 if (PGM_PAGE_IS_ALLOCATED(pPage))
2835 {
2836 /** @todo handle large pages + ballooning when it works. (see @bugref{5515},
2837 * @bugref{5545}). */
2838 AssertLogRelMsgReturn( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2839 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED,
2840 ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_PGM_LOAD_UNEXPECTED_PAGE_TYPE);
2841
2842 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys, (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
2843 AssertRCReturn(rc, rc);
2844 }
2845 Assert(PGM_PAGE_IS_ZERO(pPage));
2846 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
2847 break;
2848 }
2849
2850 case PGM_STATE_REC_RAM_RAW:
2851 {
2852 PGMPAGEMAPLOCK PgMpLck;
2853 void *pvDstPage;
2854 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage, &PgMpLck);
2855 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2856 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
2857 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2858 if (RT_FAILURE(rc))
2859 return rc;
2860 break;
2861 }
2862
2863 default:
2864 AssertMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
2865 }
2866 id = UINT8_MAX;
2867 break;
2868 }
2869
2870 /*
2871 * MMIO2 page.
2872 */
2873 case PGM_STATE_REC_MMIO2_RAW:
2874 case PGM_STATE_REC_MMIO2_ZERO:
2875 {
2876 /*
2877 * Get the ID + page number and resolved that into a MMIO2 page.
2878 */
2879 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2880 iPage++;
2881 else
2882 {
2883 SSMR3GetU8(pSSM, &id);
2884 rc = SSMR3GetU32(pSSM, &iPage);
2885 if (RT_FAILURE(rc))
2886 return rc;
2887 }
2888 if ( !pRegMmio2
2889 || pRegMmio2->idSavedState != id)
2890 {
2891 pMmio2RamRange = NULL;
2892 uint32_t const cMmio2Ranges = RT_MIN(pVM->pgm.s.cMmio2Ranges, RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges));
2893 for (uint32_t idx = 0; idx < cMmio2Ranges; idx++)
2894 if (pVM->pgm.s.aMmio2Ranges[idx].idSavedState == id)
2895 {
2896 pRegMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
2897 pMmio2RamRange = pVM->pgm.s.apMmio2RamRanges[idx];
2898 break;
2899 }
2900 AssertLogRelMsgReturn(pRegMmio2 && pMmio2RamRange, ("id=%#u iPage=%#x\n", id, iPage),
2901 VERR_PGM_SAVED_MMIO2_RANGE_NOT_FOUND);
2902 }
2903 AssertLogRelMsgReturn(iPage < (pMmio2RamRange->cb >> GUEST_PAGE_SHIFT),
2904 ("iPage=%#x cb=%RGp %s\n", iPage, pMmio2RamRange->cb, pMmio2RamRange->pszDesc),
2905 VERR_PGM_SAVED_MMIO2_PAGE_NOT_FOUND);
2906 void * const pvDstPage = &pMmio2RamRange->pbR3[(size_t)iPage << GUEST_PAGE_SHIFT];
2907
2908 /*
2909 * Load the page bits.
2910 */
2911 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_MMIO2_ZERO)
2912 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
2913 else
2914 {
2915 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
2916 if (RT_FAILURE(rc))
2917 return rc;
2918 }
2919 GCPhys = NIL_RTGCPHYS;
2920 break;
2921 }
2922
2923 /*
2924 * ROM pages.
2925 */
2926 case PGM_STATE_REC_ROM_VIRGIN:
2927 case PGM_STATE_REC_ROM_SHW_RAW:
2928 case PGM_STATE_REC_ROM_SHW_ZERO:
2929 case PGM_STATE_REC_ROM_PROT:
2930 {
2931 /*
2932 * Get the ID + page number and resolved that into a ROM page descriptor.
2933 */
2934 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2935 iPage++;
2936 else
2937 {
2938 SSMR3GetU8(pSSM, &id);
2939 rc = SSMR3GetU32(pSSM, &iPage);
2940 if (RT_FAILURE(rc))
2941 return rc;
2942 }
2943 if ( !pRom
2944 || pRom->idSavedState != id)
2945 {
2946 uint32_t const cRomRanges = RT_MIN(pVM->pgm.s.cRomRanges, RT_ELEMENTS(pVM->pgm.s.apRomRanges));
2947 uint32_t idx;
2948 for (idx = 0; idx < cRomRanges; idx++)
2949 {
2950 pRom = pVM->pgm.s.apRomRanges[idx];
2951 if (pRom->idSavedState == id)
2952 break;
2953 }
2954 AssertLogRelMsgReturn(idx < cRomRanges, ("id=%#u iPage=%#x\n", id, iPage), VERR_PGM_SAVED_ROM_RANGE_NOT_FOUND);
2955 }
2956 AssertLogRelMsgReturn(iPage < (pRom->cb >> GUEST_PAGE_SHIFT),
2957 ("iPage=%#x cb=%RGp %s\n", iPage, pRom->cb, pRom->pszDesc),
2958 VERR_PGM_SAVED_ROM_PAGE_NOT_FOUND);
2959 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2960 GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << GUEST_PAGE_SHIFT);
2961
2962 /*
2963 * Get and set the protection.
2964 */
2965 uint8_t u8Prot;
2966 rc = SSMR3GetU8(pSSM, &u8Prot);
2967 if (RT_FAILURE(rc))
2968 return rc;
2969 PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
2970 AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_PGM_SAVED_ROM_PAGE_PROT);
2971
2972 if (enmProt != pRomPage->enmProt)
2973 {
2974 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
2975 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
2976 N_("Protection change of unshadowed ROM page: GCPhys=%RGp enmProt=%d %s"),
2977 GCPhys, enmProt, pRom->pszDesc);
2978 rc = PGMR3PhysRomProtect(pVM, GCPhys, GUEST_PAGE_SIZE, enmProt);
2979 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2980 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_PGM_SAVED_ROM_PAGE_PROT);
2981 }
2982 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
2983 break; /* done */
2984
2985 /*
2986 * Get the right page descriptor.
2987 */
2988 PPGMPAGE pRealPage;
2989 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2990 {
2991 case PGM_STATE_REC_ROM_VIRGIN:
2992 if (!PGMROMPROT_IS_ROM(enmProt))
2993 pRealPage = &pRomPage->Virgin;
2994 else
2995 pRealPage = NULL;
2996 break;
2997
2998 case PGM_STATE_REC_ROM_SHW_RAW:
2999 case PGM_STATE_REC_ROM_SHW_ZERO:
3000 if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
3001 return SSMR3SetCfgError(pSSM, RT_SRC_POS,
3002 N_("Shadowed / non-shadowed page type mismatch: GCPhys=%RGp enmProt=%d %s"),
3003 GCPhys, enmProt, pRom->pszDesc);
3004 if (PGMROMPROT_IS_ROM(enmProt))
3005 pRealPage = &pRomPage->Shadow;
3006 else
3007 pRealPage = NULL;
3008 break;
3009
3010 default: AssertLogRelFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE); /* shut up gcc */
3011 }
3012#ifdef VBOX_WITH_PGM_NEM_MODE
3013 bool const fAltPage = pRealPage != NULL;
3014#endif
3015 if (!pRealPage)
3016 {
3017 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pRealPage, &pRamHint);
3018 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
3019 }
3020
3021 /*
3022 * Make it writable and map it (if necessary).
3023 */
3024 void *pvDstPage = NULL;
3025 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
3026 {
3027 case PGM_STATE_REC_ROM_SHW_ZERO:
3028 if ( PGM_PAGE_IS_ZERO(pRealPage)
3029 || PGM_PAGE_IS_BALLOONED(pRealPage))
3030 break;
3031 /** @todo implement zero page replacing. */
3032 RT_FALL_THRU();
3033 case PGM_STATE_REC_ROM_VIRGIN:
3034 case PGM_STATE_REC_ROM_SHW_RAW:
3035#ifdef VBOX_WITH_PGM_NEM_MODE
3036 if (fAltPage && PGM_IS_IN_NEM_MODE(pVM))
3037 pvDstPage = &pRom->pbR3Alternate[iPage << GUEST_PAGE_SHIFT];
3038 else
3039#endif
3040 {
3041 rc = pgmPhysPageMakeWritableAndMap(pVM, pRealPage, GCPhys, &pvDstPage);
3042 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
3043 }
3044 break;
3045 }
3046
3047 /*
3048 * Load the bits.
3049 */
3050 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
3051 {
3052 case PGM_STATE_REC_ROM_SHW_ZERO:
3053 if (pvDstPage)
3054 RT_BZERO(pvDstPage, GUEST_PAGE_SIZE);
3055 break;
3056
3057 case PGM_STATE_REC_ROM_VIRGIN:
3058 case PGM_STATE_REC_ROM_SHW_RAW:
3059 rc = SSMR3GetMem(pSSM, pvDstPage, GUEST_PAGE_SIZE);
3060 if (RT_FAILURE(rc))
3061 return rc;
3062 break;
3063 }
3064 GCPhys = NIL_RTGCPHYS;
3065 break;
3066 }
3067
3068 /*
3069 * Unknown type.
3070 */
3071 default:
3072 AssertLogRelMsgFailedReturn(("%#x\n", u8), VERR_PGM_SAVED_REC_TYPE);
3073 }
3074 } /* forever */
3075}
3076
3077
3078/**
3079 * Worker for pgmR3Load.
3080 *
3081 * @returns VBox status code.
3082 *
3083 * @param pVM The cross context VM structure.
3084 * @param pSSM The SSM handle.
3085 * @param uVersion The saved state version.
3086 */
3087static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
3088{
3089 PPGM pPGM = &pVM->pgm.s;
3090 int rc;
3091 uint32_t u32Sep;
3092
3093 /*
3094 * Load basic data (required / unaffected by relocation).
3095 */
3096 if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
3097 {
3098 if (uVersion > PGM_SAVED_STATE_VERSION_PRE_BALLOON)
3099 rc = SSMR3GetStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFields[0], NULL /*pvUser*/);
3100 else
3101 rc = SSMR3GetStructEx(pSSM, pPGM, sizeof(*pPGM), 0 /*fFlags*/, &s_aPGMFieldsPreBalloon[0], NULL /*pvUser*/);
3102
3103 AssertLogRelRCReturn(rc, rc);
3104
3105 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3106 {
3107 if (uVersion > PGM_SAVED_STATE_VERSION_PRE_PAE)
3108 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFields[0]);
3109 else
3110 rc = SSMR3GetStruct(pSSM, &pVM->apCpusR3[i]->pgm.s, &s_aPGMCpuFieldsPrePae[0]);
3111 AssertLogRelRCReturn(rc, rc);
3112 }
3113 }
3114 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
3115 {
3116 AssertRelease(pVM->cCpus == 1);
3117
3118 PGMOLD pgmOld;
3119 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
3120 AssertLogRelRCReturn(rc, rc);
3121
3122 PVMCPU pVCpu0 = pVM->apCpusR3[0];
3123 pVCpu0->pgm.s.fA20Enabled = pgmOld.fA20Enabled;
3124 pVCpu0->pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
3125 pVCpu0->pgm.s.enmGuestMode = pgmOld.enmGuestMode;
3126 }
3127 else
3128 {
3129 AssertRelease(pVM->cCpus == 1);
3130
3131 SSMR3Skip(pSSM, sizeof(bool));
3132 RTGCPTR GCPtrIgn;
3133 SSMR3GetGCPtr(pSSM, &GCPtrIgn);
3134 SSMR3Skip(pSSM, sizeof(uint32_t));
3135
3136 uint32_t cbRamSizeIgnored;
3137 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
3138 if (RT_FAILURE(rc))
3139 return rc;
3140 PVMCPU pVCpu0 = pVM->apCpusR3[0];
3141 SSMR3GetGCPhys(pSSM, &pVCpu0->pgm.s.GCPhysA20Mask);
3142
3143 uint32_t u32 = 0;
3144 SSMR3GetUInt(pSSM, &u32);
3145 pVCpu0->pgm.s.fA20Enabled = !!u32;
3146 SSMR3GetUInt(pSSM, &pVCpu0->pgm.s.fSyncFlags);
3147 RTUINT uGuestMode;
3148 SSMR3GetUInt(pSSM, &uGuestMode);
3149 pVCpu0->pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
3150
3151 /* check separator. */
3152 SSMR3GetU32(pSSM, &u32Sep);
3153 if (RT_FAILURE(rc))
3154 return rc;
3155 if (u32Sep != (uint32_t)~0)
3156 {
3157 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
3158 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
3159 }
3160 }
3161
3162 /*
3163 * Fix the A20 mask.
3164 */
3165 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3166 {
3167 PVMCPU pVCpu = pVM->apCpusR3[i];
3168 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!pVCpu->pgm.s.fA20Enabled << 20);
3169 pgmR3RefreshShadowModeAfterA20Change(pVCpu);
3170 }
3171
3172 /*
3173 * The guest mappings - skipped now, see re-fixation in the caller.
3174 */
3175 if (uVersion <= PGM_SAVED_STATE_VERSION_PRE_PAE)
3176 {
3177 for (uint32_t i = 0; ; i++)
3178 {
3179 rc = SSMR3GetU32(pSSM, &u32Sep); /* sequence number */
3180 if (RT_FAILURE(rc))
3181 return rc;
3182 if (u32Sep == ~0U)
3183 break;
3184 AssertMsgReturn(u32Sep == i, ("u32Sep=%#x i=%#x\n", u32Sep, i), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
3185
3186 char szDesc[256];
3187 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
3188 if (RT_FAILURE(rc))
3189 return rc;
3190 RTGCPTR GCPtrIgnore;
3191 SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* GCPtr */
3192 rc = SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* cPTs */
3193 if (RT_FAILURE(rc))
3194 return rc;
3195 }
3196 }
3197
3198 /*
3199 * Load the RAM contents.
3200 */
3201 if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
3202 {
3203 if (!pVM->pgm.s.LiveSave.fActive)
3204 {
3205 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3206 {
3207 rc = pgmR3LoadRamConfig(pVM, pSSM);
3208 if (RT_FAILURE(rc))
3209 return rc;
3210 }
3211 rc = pgmR3LoadRomRanges(pVM, pSSM);
3212 if (RT_FAILURE(rc))
3213 return rc;
3214 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3215 if (RT_FAILURE(rc))
3216 return rc;
3217 }
3218
3219 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, SSM_PASS_FINAL);
3220 }
3221 else
3222 rc = pgmR3LoadMemoryOld(pVM, pSSM, uVersion);
3223
3224 /* Refresh balloon accounting. */
3225 if (pVM->pgm.s.cBalloonedPages)
3226 {
3227 Log(("pgmR3LoadFinalLocked: pVM=%p cBalloonedPages=%#x\n", pVM, pVM->pgm.s.cBalloonedPages));
3228 rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_INFLATE, pVM->pgm.s.cBalloonedPages);
3229 AssertRCReturn(rc, rc);
3230 }
3231 return rc;
3232}
3233
3234
3235/**
3236 * @callback_method_impl{FNSSMINTLOADEXEC}
3237 */
3238static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3239{
3240 int rc;
3241
3242 /*
3243 * Validate version.
3244 */
3245 if ( ( uPass != SSM_PASS_FINAL
3246 && uVersion != PGM_SAVED_STATE_VERSION
3247 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3248 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3249 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3250 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3251 || ( uVersion != PGM_SAVED_STATE_VERSION
3252 && uVersion != PGM_SAVED_STATE_VERSION_PRE_PAE
3253 && uVersion != PGM_SAVED_STATE_VERSION_BALLOON_BROKEN
3254 && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
3255 && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG
3256 && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
3257 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
3258 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
3259 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
3260 )
3261 {
3262 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
3263 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
3264 }
3265
3266 /*
3267 * Do the loading while owning the lock because a bunch of the functions
3268 * we're using requires this.
3269 */
3270 if (uPass != SSM_PASS_FINAL)
3271 {
3272 PGM_LOCK_VOID(pVM);
3273 if (uPass != 0)
3274 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3275 else
3276 {
3277 pVM->pgm.s.LiveSave.fActive = true;
3278 if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
3279 rc = pgmR3LoadRamConfig(pVM, pSSM);
3280 else
3281 rc = VINF_SUCCESS;
3282 if (RT_SUCCESS(rc))
3283 rc = pgmR3LoadRomRanges(pVM, pSSM);
3284 if (RT_SUCCESS(rc))
3285 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
3286 if (RT_SUCCESS(rc))
3287 rc = pgmR3LoadMemory(pVM, pSSM, uVersion, uPass);
3288 }
3289 PGM_UNLOCK(pVM);
3290 }
3291 else
3292 {
3293 PGM_LOCK_VOID(pVM);
3294 rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
3295 pVM->pgm.s.LiveSave.fActive = false;
3296 PGM_UNLOCK(pVM);
3297 if (RT_SUCCESS(rc))
3298 {
3299 /*
3300 * We require a full resync now.
3301 */
3302 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3303 {
3304 PVMCPU pVCpu = pVM->apCpusR3[i];
3305 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
3306 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3307 /** @todo For guest PAE, we might get the wrong
3308 * aGCPhysGstPaePDs values now. We should used the
3309 * saved ones... Postponing this since it nothing new
3310 * and PAE/PDPTR needs some general readjusting, see
3311 * @bugref{5880}. */
3312 }
3313
3314 pgmR3HandlerPhysicalUpdateAll(pVM);
3315
3316 /*
3317 * Change the paging mode (indirectly restores PGMCPU::GCPhysCR3).
3318 * (Requires the CPUM state to be restored already!)
3319 */
3320 if (CPUMR3IsStateRestorePending(pVM))
3321 return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
3322 N_("PGM was unexpectedly restored before CPUM"));
3323
3324 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3325 {
3326 PVMCPU pVCpu = pVM->apCpusR3[i];
3327
3328 rc = PGMHCChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode, false /* fForce */);
3329 AssertLogRelRCReturn(rc, rc);
3330
3331#if !defined(VBOX_VMM_TARGET_ARMV8)
3332 /* Update the PSE, NX flags and validity masks. */
3333 pVCpu->pgm.s.fGst32BitPageSizeExtension = CPUMIsGuestPageSizeExtEnabled(pVCpu);
3334 PGMNotifyNxeChanged(pVCpu, CPUMIsGuestNXEnabled(pVCpu));
3335#endif
3336 }
3337 }
3338 }
3339
3340 return rc;
3341}
3342
3343
3344/**
3345 * @callback_method_impl{FNSSMINTLOADDONE}
3346 */
3347static DECLCALLBACK(int) pgmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
3348{
3349 pVM->pgm.s.fRestoreRomPagesOnReset = true;
3350 NOREF(pSSM);
3351 return VINF_SUCCESS;
3352}
3353
3354
3355/**
3356 * Registers the saved state callbacks with SSM.
3357 *
3358 * @returns VBox status code.
3359 * @param pVM The cross context VM structure.
3360 * @param cbRam The RAM size.
3361 */
3362int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
3363{
3364 return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
3365 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
3366 NULL, pgmR3SaveExec, pgmR3SaveDone,
3367 pgmR3LoadPrep, pgmR3Load, pgmR3LoadDone);
3368}
3369
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette