VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMSavedState.cpp@ 23495

Last change on this file since 23495 was 23489, checked in by vboxsync, 15 years ago

PGMSavedState: More write lock checks. Added missing PGM::cMonitoredPages update in the save-done routine.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 91.9 KB
Line 
1/* $Id: PGMSavedState.cpp 23489 2009-10-01 15:42:44Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, The Saved State Part.
4 */
5
6/*
7 * Copyright (C) 2006-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/pgm.h>
28#include <VBox/stam.h>
29#include <VBox/ssm.h>
30#include <VBox/pdm.h>
31#include "PGMInternal.h"
32#include <VBox/vm.h>
33
34#include <VBox/param.h>
35#include <VBox/err.h>
36
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/mem.h>
40#include <iprt/string.h>
41#include <iprt/thread.h>
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** Saved state data unit version. */
48#ifdef VBOX_WITH_LIVE_MIGRATION
49# define PGM_SAVED_STATE_VERSION 10
50#else
51# define PGM_SAVED_STATE_VERSION 9
52#endif
53/** Saved state data unit version for 3.0. (pre live migration) */
54#define PGM_SAVED_STATE_VERSION_3_0_0 9
55/** Saved state data unit version for 2.2.2 and later. */
56#define PGM_SAVED_STATE_VERSION_2_2_2 8
57/** Saved state data unit version for 2.2.0. */
58#define PGM_SAVED_STATE_VERSION_RR_DESC 7
59/** Saved state data unit version. */
60#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6
61
62
63/** @name Sparse state record types
64 * @{ */
65/** Zero page. No data. */
66#define PGM_STATE_REC_RAM_ZERO UINT8_C(0x00)
67/** Raw page. */
68#define PGM_STATE_REC_RAM_RAW UINT8_C(0x01)
69/** Raw MMIO2 page. */
70#define PGM_STATE_REC_MMIO2_RAW UINT8_C(0x02)
71/** Zero MMIO2 page. */
72#define PGM_STATE_REC_MMIO2_ZERO UINT8_C(0x03)
73/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
74#define PGM_STATE_REC_ROM_VIRGIN UINT8_C(0x04)
75/** Raw shadowed ROM page. The protection (8-bit) preceeds the raw bits. */
76#define PGM_STATE_REC_ROM_SHW_RAW UINT8_C(0x05)
77/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
78#define PGM_STATE_REC_ROM_SHW_ZERO UINT8_C(0x06)
79/** ROM protection (8-bit). */
80#define PGM_STATE_REC_ROM_PROT UINT8_C(0x07)
81/** The last record type. */
82#define PGM_STATE_REC_LAST PGM_STATE_REC_ROM_PROT
83/** End marker. */
84#define PGM_STATE_REC_END UINT8_C(0xff)
85/** Flag indicating that the data is preceeded by the page address.
86 * For RAW pages this is a RTGCPHYS. For MMIO2 and ROM pages this is a 8-bit
87 * range ID and a 32-bit page index.
88 */
89#define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80)
90/** @} */
91
92
93/*******************************************************************************
94* Structures and Typedefs *
95*******************************************************************************/
96/** For loading old saved states. (pre-smp) */
97typedef struct
98{
99 /** If set no conflict checks are required. (boolean) */
100 bool fMappingsFixed;
101 /** Size of fixed mapping */
102 uint32_t cbMappingFixed;
103 /** Base address (GC) of fixed mapping */
104 RTGCPTR GCPtrMappingFixed;
105 /** A20 gate mask.
106 * Our current approach to A20 emulation is to let REM do it and don't bother
107 * anywhere else. The interesting Guests will be operating with it enabled anyway.
108 * But whould need arrise, we'll subject physical addresses to this mask. */
109 RTGCPHYS GCPhysA20Mask;
110 /** A20 gate state - boolean! */
111 bool fA20Enabled;
112 /** The guest paging mode. */
113 PGMMODE enmGuestMode;
114} PGMOLD;
115
116
117/*******************************************************************************
118* Global Variables *
119*******************************************************************************/
120/** PGM fields to save/load. */
121static const SSMFIELD s_aPGMFields[] =
122{
123 SSMFIELD_ENTRY( PGM, fMappingsFixed),
124 SSMFIELD_ENTRY_GCPTR( PGM, GCPtrMappingFixed),
125 SSMFIELD_ENTRY( PGM, cbMappingFixed),
126 SSMFIELD_ENTRY_TERM()
127};
128
129static const SSMFIELD s_aPGMCpuFields[] =
130{
131 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
132 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
133 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
134 SSMFIELD_ENTRY_TERM()
135};
136
137static const SSMFIELD s_aPGMFields_Old[] =
138{
139 SSMFIELD_ENTRY( PGMOLD, fMappingsFixed),
140 SSMFIELD_ENTRY_GCPTR( PGMOLD, GCPtrMappingFixed),
141 SSMFIELD_ENTRY( PGMOLD, cbMappingFixed),
142 SSMFIELD_ENTRY( PGMOLD, fA20Enabled),
143 SSMFIELD_ENTRY_GCPHYS( PGMOLD, GCPhysA20Mask),
144 SSMFIELD_ENTRY( PGMOLD, enmGuestMode),
145 SSMFIELD_ENTRY_TERM()
146};
147
148
149/**
150 * Find the ROM tracking structure for the given page.
151 *
152 * @returns Pointer to the ROM page structure. NULL if the caller didn't check
153 * that it's a ROM page.
154 * @param pVM The VM handle.
155 * @param GCPhys The address of the ROM page.
156 */
157static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
158{
159 for (PPGMROMRANGE pRomRange = pVM->pgm.s.CTX_SUFF(pRomRanges);
160 pRomRange;
161 pRomRange = pRomRange->CTX_SUFF(pNext))
162 {
163 RTGCPHYS off = GCPhys - pRomRange->GCPhys;
164 if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
165 return &pRomRange->aPages[off >> PAGE_SHIFT];
166 }
167 return NULL;
168}
169
170
171/**
172 * Prepare for a live save operation.
173 *
174 * This will attempt to allocate and initialize the tracking structures. It
175 * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
176 * pgmR3SaveDone will do the cleanups.
177 *
178 * @returns VBox status code.
179 *
180 * @param pVM The VM handle.
181 * @param pSSM The SSM handle.
182 */
183static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
184{
185 /*
186 * Indicate that we will be using the write monitoring.
187 */
188 pgmLock(pVM);
189 /** @todo find a way of mediating this when more users are added. */
190 if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
191 {
192 pgmUnlock(pVM);
193 AssertLogRelFailedReturn(VERR_INTERNAL_ERROR_2);
194 }
195 pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
196 pgmUnlock(pVM);
197
198 /*
199 * Initialize the statistics.
200 */
201 pVM->pgm.s.LiveSave.cReadyPages = 0;
202 pVM->pgm.s.LiveSave.cDirtyPages = 0;
203 pVM->pgm.s.LiveSave.cIgnoredPages = 0;
204 pVM->pgm.s.LiveSave.cMmio2Pages = 0;
205 pVM->pgm.s.LiveSave.fActive = true;
206
207 /*
208 * Initialize the live save tracking in the MMIO2 ranges.
209 */
210 pgmLock(pVM);
211 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
212 {
213 uint32_t const cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
214
215#if 0 /** @todo MMIO2 dirty page tracking for live save. */
216 for (uint32_t iPage = 0; iPage < cPages; iPage++)
217 {
218 }
219#endif
220 pVM->pgm.s.LiveSave.cMmio2Pages += cPages;
221 }
222 pgmUnlock(pVM);
223
224 /*
225 * Initialize the live save tracking in the ROM page descriptors.
226 */
227 pgmLock(pVM);
228 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
229 {
230 PPGMRAMRANGE pRamHint = NULL;;
231 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
232
233 for (uint32_t iPage = 0; iPage < cPages; iPage++)
234 {
235 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)PGMROMPROT_INVALID;
236 pRom->aPages[iPage].LiveSave.fWrittenTo = false;
237 pRom->aPages[iPage].LiveSave.fDirty = true;
238 pRom->aPages[iPage].LiveSave.fDirtiedRecently = true;
239 if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
240 {
241 if (PGMROMPROT_IS_ROM(pRom->aPages[iPage].enmProt))
242 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow);
243 else
244 {
245 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
246 PPGMPAGE pPage;
247 int rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, GCPhys, &pPage, &pRamHint);
248 AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys));
249 if (RT_SUCCESS(rc))
250 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(pPage);
251 else
252 pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow);
253 }
254 }
255 }
256
257 pVM->pgm.s.LiveSave.cDirtyPages += cPages;
258 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
259 pVM->pgm.s.LiveSave.cDirtyPages += cPages;
260 }
261 pgmUnlock(pVM);
262
263 /*
264 * Try allocating tracking structures for the ram ranges.
265 *
266 * To avoid lock contention, we leave the lock every time we're allocating
267 * a new array. This means we'll have to ditch the allocation and start
268 * all over again if the RAM range list changes in-between.
269 *
270 * Note! pgmR3SaveDone will always be called and it is therefore responsible
271 * for cleaning up.
272 */
273 PPGMRAMRANGE pCur;
274 pgmLock(pVM);
275 do
276 {
277 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
278 {
279 if ( !pCur->paLSPages
280 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
281 {
282 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
283 uint32_t const cPages = pCur->cb >> PAGE_SHIFT;
284 pgmUnlock(pVM);
285 PPGMLIVESAVEPAGE paLSPages = (PPGMLIVESAVEPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVEPAGE));
286 if (!paLSPages)
287 return VERR_NO_MEMORY;
288 pgmLock(pVM);
289 if (pVM->pgm.s.idRamRangesGen != idRamRangesGen)
290 {
291 pgmUnlock(pVM);
292 MMR3HeapFree(paLSPages);
293 pgmLock(pVM);
294 break; /* try again */
295 }
296 pCur->paLSPages = paLSPages;
297
298 /*
299 * Initialize the array.
300 */
301 uint32_t iPage = cPages;
302 while (iPage-- > 0)
303 {
304 /** @todo yield critsect! (after moving this away from EMT0) */
305 PCPGMPAGE pPage = &pCur->aPages[iPage];
306 paLSPages[iPage].uPassSaved = UINT32_MAX;
307 paLSPages[iPage].cDirtied = 0;
308 paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */
309 paLSPages[iPage].fWriteMonitored = 0;
310 paLSPages[iPage].fWriteMonitoredJustNow = 0;
311 paLSPages[iPage].u2Reserved = 0;
312 switch (PGM_PAGE_GET_TYPE(pPage))
313 {
314 case PGMPAGETYPE_RAM:
315 if (PGM_PAGE_IS_ZERO(pPage))
316 {
317 paLSPages[iPage].fZero = 1;
318 paLSPages[iPage].fShared = 0;
319 }
320 else if (PGM_PAGE_IS_SHARED(pPage))
321 {
322 paLSPages[iPage].fZero = 0;
323 paLSPages[iPage].fShared = 1;
324 }
325 else
326 {
327 paLSPages[iPage].fZero = 0;
328 paLSPages[iPage].fShared = 0;
329 }
330 paLSPages[iPage].fIgnore = 0;
331 pVM->pgm.s.LiveSave.cDirtyPages++;
332 break;
333
334 case PGMPAGETYPE_ROM_SHADOW:
335 case PGMPAGETYPE_ROM:
336 {
337 paLSPages[iPage].fZero = 0;
338 paLSPages[iPage].fShared = 0;
339 paLSPages[iPage].fDirty = 0;
340 paLSPages[iPage].fIgnore = 1;
341 pVM->pgm.s.LiveSave.cIgnoredPages++;
342 break;
343 }
344
345 default:
346 AssertMsgFailed(("%R[pgmpage]", pPage));
347 case PGMPAGETYPE_MMIO2:
348 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
349 paLSPages[iPage].fZero = 0;
350 paLSPages[iPage].fShared = 0;
351 paLSPages[iPage].fDirty = 0;
352 paLSPages[iPage].fIgnore = 1;
353 pVM->pgm.s.LiveSave.cIgnoredPages++;
354 break;
355
356 case PGMPAGETYPE_MMIO:
357 paLSPages[iPage].fZero = 0;
358 paLSPages[iPage].fShared = 0;
359 paLSPages[iPage].fDirty = 0;
360 paLSPages[iPage].fIgnore = 1;
361 pVM->pgm.s.LiveSave.cIgnoredPages++;
362 break;
363 }
364 }
365 }
366 }
367 } while (pCur);
368 pgmUnlock(pVM);
369
370 return VINF_SUCCESS;
371}
372
373
374/**
375 * Assigns IDs to the ROM ranges and saves them.
376 *
377 * @returns VBox status code.
378 * @param pVM The VM handle.
379 * @param pSSM Saved state handle.
380 */
381static int pgmR3SaveRomRanges(PVM pVM, PSSMHANDLE pSSM)
382{
383 pgmLock(pVM);
384 uint8_t id = 1;
385 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3, id++)
386 {
387 pRom->idSavedState = id;
388 SSMR3PutU8(pSSM, id);
389 SSMR3PutStrZ(pSSM, ""); /* device name */
390 SSMR3PutU32(pSSM, 0); /* device instance */
391 SSMR3PutU8(pSSM, 0); /* region */
392 SSMR3PutStrZ(pSSM, pRom->pszDesc);
393 SSMR3PutGCPhys(pSSM, pRom->GCPhys);
394 int rc = SSMR3PutGCPhys(pSSM, pRom->cb);
395 if (RT_FAILURE(rc))
396 break;
397 }
398 pgmUnlock(pVM);
399 return SSMR3PutU8(pSSM, UINT8_MAX);
400}
401
402
403/**
404 * Loads the ROM range ID assignments.
405 *
406 * @returns VBox status code.
407 *
408 * @param pVM The VM handle.
409 * @param pSSM The saved state handle.
410 */
411static int pgmR3LoadRomRanges(PVM pVM, PSSMHANDLE pSSM)
412{
413 Assert(PGMIsLockOwner(pVM));
414
415 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
416 pRom->idSavedState = UINT8_MAX;
417
418 for (;;)
419 {
420 /*
421 * Read the data.
422 */
423 uint8_t id;
424 int rc = SSMR3GetU8(pSSM, &id);
425 if (RT_FAILURE(rc))
426 return rc;
427 if (id == UINT8_MAX)
428 {
429 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
430 AssertLogRelMsg(pRom->idSavedState != UINT8_MAX, ("%s\n", pRom->pszDesc));
431 return VINF_SUCCESS; /* the end */
432 }
433 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
434
435 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szDeviceName)];
436 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
437 AssertLogRelRCReturn(rc, rc);
438
439 uint32_t uInstance;
440 SSMR3GetU32(pSSM, &uInstance);
441 uint8_t iRegion;
442 SSMR3GetU8(pSSM, &iRegion);
443
444 char szDesc[64];
445 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
446 AssertLogRelRCReturn(rc, rc);
447
448 RTGCPHYS GCPhys;
449 SSMR3GetGCPhys(pSSM, &GCPhys);
450 RTGCPHYS cb;
451 rc = SSMR3GetGCPhys(pSSM, &cb);
452 if (RT_FAILURE(rc))
453 return rc;
454 AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
455 AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
456
457 /*
458 * Locate a matching ROM range.
459 */
460 AssertLogRelMsgReturn( uInstance == 0
461 && iRegion == 0
462 && szDevName[0] == '\0',
463 ("GCPhys=%RGp %s\n", GCPhys, szDesc),
464 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
465 PPGMROMRANGE pRom;
466 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
467 {
468 if ( pRom->idSavedState == UINT8_MAX
469 && !strcmp(pRom->pszDesc, szDesc))
470 {
471 pRom->idSavedState = id;
472 break;
473 }
474 }
475 AssertLogRelMsgReturn(pRom, ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_LOAD_CONFIG_MISMATCH);
476 } /* forever */
477}
478
479
480/**
481 * Takes care of the virgin ROM pages in the first pass.
482 *
483 * This is an attempt at simplifying the handling of ROM pages a little bit.
484 * This ASSUMES that no new ROM ranges will be added and that they won't be
485 * relinked in any way.
486 *
487 * @param pVM The VM handle.
488 * @param pSSM The SSM handle.
489 * @param fLiveSave Whether we're in a live save or not.
490 */
491static int pgmR3SaveRomVirginPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
492{
493 pgmLock(pVM);
494 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
495 {
496 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
497 for (uint32_t iPage = 0; iPage < cPages; iPage++)
498 {
499 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
500 PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;
501
502 /* Get the virgin page descriptor. */
503 PPGMPAGE pPage;
504 if (PGMROMPROT_IS_ROM(enmProt))
505 pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
506 else
507 pPage = &pRom->aPages[iPage].Virgin;
508
509 /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
510 int rc = VINF_SUCCESS;
511 char abPage[PAGE_SIZE];
512 if (!PGM_PAGE_IS_ZERO(pPage))
513 {
514 void const *pvPage;
515 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
516 if (RT_SUCCESS(rc))
517 memcpy(abPage, pvPage, PAGE_SIZE);
518 }
519 else
520 ASMMemZeroPage(abPage);
521 pgmUnlock(pVM);
522 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
523
524 /* Save it. */
525 if (iPage > 0)
526 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
527 else
528 {
529 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
530 SSMR3PutU8(pSSM, pRom->idSavedState);
531 SSMR3PutU32(pSSM, iPage);
532 }
533 SSMR3PutU8(pSSM, (uint8_t)enmProt);
534 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
535 if (RT_FAILURE(rc))
536 return rc;
537
538 /* Update state. */
539 pgmLock(pVM);
540 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
541 if (fLiveSave)
542 {
543 pVM->pgm.s.LiveSave.cDirtyPages--;
544 pVM->pgm.s.LiveSave.cReadyPages++;
545 }
546 }
547 }
548 pgmUnlock(pVM);
549 return VINF_SUCCESS;
550}
551
552
553/**
554 * Saves dirty pages in the shadowed ROM ranges.
555 *
556 * Used by pgmR3LiveExecPart2 and pgmR3SaveExecMemory.
557 *
558 * @returns VBox status code.
559 * @param pVM The VM handle.
560 * @param pSSM The SSM handle.
561 * @param fLiveSave Whether it's a live save or not.
562 * @param fFinalPass Whether this is the final pass or not.
563 */
564static int pgmR3SaveShadowedRomPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
565{
566 /*
567 * The Shadowed ROMs.
568 *
569 * ASSUMES that the ROM ranges are fixed.
570 * ASSUMES that all the ROM ranges are mapped.
571 */
572 pgmLock(pVM);
573 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
574 {
575 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
576 {
577 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
578 uint32_t iPrevPage = cPages;
579 for (uint32_t iPage = 0; iPage < cPages; iPage++)
580 {
581 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
582 if ( !fLiveSave
583 || ( pRomPage->LiveSave.fDirty
584 && ( ( !pRomPage->LiveSave.fDirtiedRecently
585 && !pRomPage->LiveSave.fWrittenTo)
586 || fFinalPass
587 )
588 )
589 )
590 {
591 uint8_t abPage[PAGE_SIZE];
592 PGMROMPROT enmProt = pRomPage->enmProt;
593 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
594 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(&pVM->pgm.s, GCPhys);
595 bool fZero = PGM_PAGE_IS_ZERO(pPage);
596 int rc = VINF_SUCCESS;
597 if (!fZero)
598 {
599 void const *pvPage;
600 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
601 if (RT_SUCCESS(rc))
602 memcpy(abPage, pvPage, PAGE_SIZE);
603 }
604 if (fLiveSave && RT_SUCCESS(rc))
605 {
606 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
607 pRomPage->LiveSave.fDirty = false;
608 pVM->pgm.s.LiveSave.cReadyPages++;
609 pVM->pgm.s.LiveSave.cDirtyPages--;
610 }
611 pgmUnlock(pVM);
612 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
613
614 if (iPage - 1U == iPrevPage && iPage > 0)
615 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW));
616 else
617 {
618 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW) | PGM_STATE_REC_FLAG_ADDR);
619 SSMR3PutU8(pSSM, pRom->idSavedState);
620 SSMR3PutU32(pSSM, iPage);
621 }
622 rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
623 if (!fZero)
624 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
625 if (RT_FAILURE(rc))
626 return rc;
627
628 pgmLock(pVM);
629 iPrevPage = iPage;
630 }
631 /*
632 * In the final pass, make sure the protection is in sync.
633 */
634 else if ( fFinalPass
635 && pRomPage->LiveSave.u8Prot != pRomPage->enmProt)
636 {
637 PGMROMPROT enmProt = pRomPage->enmProt;
638 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
639 pgmUnlock(pVM);
640
641 if (iPage - 1U == iPrevPage && iPage > 0)
642 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT);
643 else
644 {
645 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT | PGM_STATE_REC_FLAG_ADDR);
646 SSMR3PutU8(pSSM, pRom->idSavedState);
647 SSMR3PutU32(pSSM, iPage);
648 }
649 int rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
650 if (RT_FAILURE(rc))
651 return rc;
652
653 pgmLock(pVM);
654 iPrevPage = iPage;
655 }
656 }
657 }
658 }
659 pgmUnlock(pVM);
660 return VINF_SUCCESS;
661}
662
663
664/**
665 * Assigns IDs to the MMIO2 ranges and saves them.
666 *
667 * @returns VBox status code.
668 * @param pVM The VM handle.
669 * @param pSSM Saved state handle.
670 */
671static int pgmR3SaveMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
672{
673 pgmLock(pVM);
674 uint8_t id = 1;
675 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3, id++)
676 {
677 pMmio2->idSavedState = id;
678 SSMR3PutU8(pSSM, id);
679 SSMR3PutStrZ(pSSM, pMmio2->pDevInsR3->pDevReg->szDeviceName);
680 SSMR3PutU32(pSSM, pMmio2->pDevInsR3->iInstance);
681 SSMR3PutU8(pSSM, pMmio2->iRegion);
682 SSMR3PutStrZ(pSSM, pMmio2->RamRange.pszDesc);
683 int rc = SSMR3PutGCPhys(pSSM, pMmio2->RamRange.cb);
684 if (RT_FAILURE(rc))
685 break;
686 }
687 pgmUnlock(pVM);
688 return SSMR3PutU8(pSSM, UINT8_MAX);
689}
690
691
692/**
693 * Loads the MMIO2 range ID assignments.
694 *
695 * @returns VBox status code.
696 *
697 * @param pVM The VM handle.
698 * @param pSSM The saved state handle.
699 */
700static int pgmR3LoadMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
701{
702 Assert(PGMIsLockOwner(pVM));
703
704 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
705 pMmio2->idSavedState = UINT8_MAX;
706
707 for (;;)
708 {
709 /*
710 * Read the data.
711 */
712 uint8_t id;
713 int rc = SSMR3GetU8(pSSM, &id);
714 if (RT_FAILURE(rc))
715 return rc;
716 if (id == UINT8_MAX)
717 {
718 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
719 AssertLogRelMsg(pMmio2->idSavedState != UINT8_MAX, ("%s\n", pMmio2->RamRange.pszDesc));
720 return VINF_SUCCESS; /* the end */
721 }
722 AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
723
724 char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szDeviceName)];
725 rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
726 AssertLogRelRCReturn(rc, rc);
727
728 uint32_t uInstance;
729 SSMR3GetU32(pSSM, &uInstance);
730 uint8_t iRegion;
731 SSMR3GetU8(pSSM, &iRegion);
732
733 char szDesc[64];
734 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
735 AssertLogRelRCReturn(rc, rc);
736
737 RTGCPHYS cb;
738 rc = SSMR3GetGCPhys(pSSM, &cb);
739 AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
740
741 /*
742 * Locate a matching MMIO2 range.
743 */
744 PPGMMMIO2RANGE pMmio2;
745 for (pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
746 {
747 if ( pMmio2->idSavedState == UINT8_MAX
748 && pMmio2->iRegion == iRegion
749 && pMmio2->pDevInsR3->iInstance == uInstance
750 && !strcmp(pMmio2->pDevInsR3->pDevReg->szDeviceName, szDevName))
751 {
752 pMmio2->idSavedState = id;
753 break;
754 }
755 }
756 AssertLogRelMsgReturn(pMmio2, ("%s/%u/%u: %s\n", szDevName, uInstance, iRegion, szDesc), VERR_SSM_LOAD_CONFIG_MISMATCH);
757 } /* forever */
758}
759
760
761
762/**
763 * Save quiescent MMIO2 pages.
764 *
765 * @returns VBox status code.
766 * @param pVM The VM handle.
767 * @param pSSM The SSM handle.
768 * @param fLiveSave Whether it's a live save or not.
769 * @param fFinalPass Whether this is the final pass or not.
770 */
771static int pgmR3SaveMmio2Pages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
772{
773 int rc = VINF_SUCCESS;
774 /** @todo implement live saving of MMIO2 pages. (Need some way of telling the
775 * device that we wish to know about changes.) */
776 if (fFinalPass)
777 {
778 pgmLock(pVM);
779 for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3;
780 pMmio2 && RT_SUCCESS(rc);
781 pMmio2 = pMmio2->pNextR3)
782 {
783 uint8_t const *pbPage = (uint8_t const *)pMmio2->RamRange.pvR3;
784 uint32_t cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
785 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
786 {
787 uint8_t u8Type = ASMMemIsZeroPage(pbPage) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
788 if (iPage != 0)
789 rc = SSMR3PutU8(pSSM, u8Type);
790 else
791 {
792 SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
793 SSMR3PutU8(pSSM, pMmio2->idSavedState);
794 rc = SSMR3PutU32(pSSM, 0);
795 }
796 if (u8Type == PGM_STATE_REC_MMIO2_RAW)
797 rc = SSMR3PutMem(pSSM, pbPage, PAGE_SIZE);
798 if (RT_FAILURE(rc))
799 break;
800 }
801 }
802 pgmUnlock(pVM);
803 }
804
805 return rc;
806}
807
808
809/**
810 * Save quiescent RAM pages.
811 *
812 * @returns VBox status code.
813 * @param pVM The VM handle.
814 * @param pSSM The SSM handle.
815 * @param fLiveSave Whether it's a live save or not.
816 * @param fFinalPass Whether this is the final pass or not.
817 */
818static int pgmR3SaveRamPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
819{
820 /*
821 * The RAM.
822 */
823 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
824 RTGCPHYS GCPhysCur = 0;
825 PPGMRAMRANGE pCur;
826 pgmLock(pVM);
827 do
828 {
829 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
830 uint32_t cSinceYield = 0;
831 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
832 {
833 if ( pCur->GCPhysLast > GCPhysCur
834 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
835 {
836 PPGMLIVESAVEPAGE paLSPages = pCur->paLSPages;
837 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
838 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
839 GCPhysCur = 0;
840 for (; iPage < cPages; iPage++, cSinceYield++)
841 {
842 /* Do yield first. */
843 if ( uPass != SSM_PASS_FINAL
844 && (cSinceYield & 0x7ff) == 0x7ff
845 && PDMR3CritSectYield(&pVM->pgm.s.CritSect)
846 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
847 {
848 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
849 break; /* restart */
850 }
851
852 /*
853 * Only save pages that hasn't changed since last scan and are dirty.
854 */
855 if ( uPass != SSM_PASS_FINAL
856 && paLSPages)
857 {
858 if (!paLSPages[iPage].fDirty)
859 continue;
860 if (paLSPages[iPage].fWriteMonitoredJustNow)
861 continue;
862 if (paLSPages[iPage].fIgnore)
863 continue;
864 if (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) != PGMPAGETYPE_RAM) /* in case of recent ramppings */
865 continue;
866 if ( PGM_PAGE_GET_STATE(&pCur->aPages[iPage])
867 != ( paLSPages[iPage].fZero
868 ? PGM_PAGE_STATE_ZERO
869 : paLSPages[iPage].fShared
870 ? PGM_PAGE_STATE_SHARED
871 : PGM_PAGE_STATE_WRITE_MONITORED))
872 continue;
873 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) > 0)
874 continue;
875 }
876 else
877 {
878 if ( paLSPages
879 && !paLSPages[iPage].fDirty
880 && !paLSPages[iPage].fIgnore)
881 continue;
882 if (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) != PGMPAGETYPE_RAM)
883 continue;
884 }
885
886 /*
887 * Do the saving outside the PGM critsect since SSM may block on I/O.
888 */
889 int rc;
890 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
891
892 if (!PGM_PAGE_IS_ZERO(&pCur->aPages[iPage]))
893 {
894 /*
895 * Copy the page and then save it outside the lock (since any
896 * SSM call may block).
897 */
898 char abPage[PAGE_SIZE];
899 void const *pvPage;
900 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage);
901 if (RT_SUCCESS(rc))
902 memcpy(abPage, pvPage, PAGE_SIZE);
903 pgmUnlock(pVM);
904 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
905
906 if (GCPhys == GCPhysLast + PAGE_SIZE)
907 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
908 else
909 {
910 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
911 SSMR3PutGCPhys(pSSM, GCPhys);
912 }
913 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
914 }
915 else
916 {
917 /*
918 * Dirty zero page.
919 */
920 pgmUnlock(pVM);
921
922 if (GCPhys == GCPhysLast + PAGE_SIZE)
923 rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO);
924 else
925 {
926 SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO | PGM_STATE_REC_FLAG_ADDR);
927 rc = SSMR3PutGCPhys(pSSM, GCPhys);
928 }
929 }
930 if (RT_FAILURE(rc))
931 return rc;
932
933 pgmLock(pVM);
934 GCPhysLast = GCPhys;
935 if (paLSPages)
936 {
937 paLSPages[iPage].fDirty = 0;
938 paLSPages[iPage].uPassSaved = uPass;
939 pVM->pgm.s.LiveSave.cReadyPages++;
940 pVM->pgm.s.LiveSave.cDirtyPages--;
941 }
942 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
943 {
944 GCPhysCur = GCPhys | PAGE_OFFSET_MASK;
945 break; /* restart */
946 }
947
948 } /* for each page in range */
949
950 if (GCPhysCur != 0)
951 break; /* Yield + ramrange change */
952 GCPhysCur = pCur->GCPhysLast;
953 }
954 } /* for each range */
955 } while (pCur);
956 pgmUnlock(pVM);
957
958 return VINF_SUCCESS;
959}
960
961
962/**
963 * Scan for page modifications and reprotect them.
964 *
965 * Note! Since we don't care about MMIO or MMIO2 pages and since we don't
966 * have any movable ROMs yet, we can safely yield the PGM when we
967 * detect contention.
968 *
969 * This holds true for part 2 as well.
970 *
971 * @param pVM The VM handle.
972 * @param fFinalPass Whether this is the final pass or not.
973 */
974static void pgmR3LiveExecScanPages(PVM pVM, bool fFinalPass)
975{
976 /*
977 * The shadow ROMs.
978 */
979 pgmLock(pVM);
980 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
981 {
982 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
983 {
984 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
985 for (uint32_t iPage = 0; iPage < cPages; iPage++)
986 {
987 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
988 if (pRomPage->LiveSave.fWrittenTo)
989 {
990 pRomPage->LiveSave.fWrittenTo = false;
991 if (!pRomPage->LiveSave.fDirty)
992 {
993 pRomPage->LiveSave.fDirty = true;
994 pVM->pgm.s.LiveSave.cReadyPages--;
995 pVM->pgm.s.LiveSave.cDirtyPages++;
996 }
997 pRomPage->LiveSave.fDirtiedRecently = true;
998 }
999 else
1000 pRomPage->LiveSave.fDirtiedRecently = false;
1001 }
1002 }
1003 }
1004 pgmUnlock(pVM);
1005
1006 /*
1007 * The MMIO2 ranges.
1008 */
1009 /* later */
1010
1011 /*
1012 * The RAM.
1013 */
1014 RTGCPHYS GCPhysCur = 0;
1015 PPGMRAMRANGE pCur;
1016 pgmLock(pVM);
1017 do
1018 {
1019 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1020 uint32_t cSinceYield = 0;
1021 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
1022 {
1023 if ( pCur->GCPhysLast > GCPhysCur
1024 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
1025 {
1026 PPGMLIVESAVEPAGE paLSPages = pCur->paLSPages;
1027 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
1028 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
1029 GCPhysCur = 0;
1030 for (; iPage < cPages; iPage++, cSinceYield++)
1031 {
1032 /* Do yield first. */
1033 if ( !fFinalPass
1034 && (cSinceYield & 0x7ff) == 0x7ff
1035 && PDMR3CritSectYield(&pVM->pgm.s.CritSect)
1036 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
1037 {
1038 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1039 break; /* restart */
1040 }
1041
1042 /* Skip already ignored pages. */
1043 if (paLSPages[iPage].fIgnore)
1044 continue;
1045
1046 if (RT_LIKELY(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM))
1047 {
1048 /*
1049 * A RAM page.
1050 */
1051 switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
1052 {
1053 case PGM_PAGE_STATE_ALLOCATED:
1054 /** @todo Optimize this: Don't always re-enable write
1055 * monitoring if the page is known to be very busy. */
1056 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1057 {
1058 Assert(paLSPages[iPage].fWriteMonitored);
1059 PGM_PAGE_CLEAR_WRITTEN_TO(&pCur->aPages[iPage]);
1060 Assert(pVM->pgm.s.cWrittenToPages > 0);
1061 pVM->pgm.s.cWrittenToPages--;
1062 }
1063 else
1064 {
1065 Assert(!paLSPages[iPage].fWriteMonitored);
1066 pVM->pgm.s.LiveSave.cMonitoredPages++;
1067 }
1068
1069 if (!paLSPages[iPage].fDirty)
1070 {
1071 pVM->pgm.s.LiveSave.cReadyPages--;
1072 pVM->pgm.s.LiveSave.cDirtyPages++;
1073 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1074 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1075 }
1076
1077 PGM_PAGE_SET_STATE(&pCur->aPages[iPage], PGM_PAGE_STATE_WRITE_MONITORED);
1078 pVM->pgm.s.cMonitoredPages++;
1079 paLSPages[iPage].fWriteMonitored = 1;
1080 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1081 paLSPages[iPage].fDirty = 1;
1082 paLSPages[iPage].fZero = 0;
1083 paLSPages[iPage].fShared = 0;
1084 break;
1085
1086 case PGM_PAGE_STATE_WRITE_MONITORED:
1087 Assert(paLSPages[iPage].fWriteMonitored);
1088 if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) == 0)
1089 paLSPages[iPage].fWriteMonitoredJustNow = 0;
1090 else
1091 {
1092 paLSPages[iPage].fWriteMonitoredJustNow = 1;
1093 if (!paLSPages[iPage].fDirty)
1094 {
1095 pVM->pgm.s.LiveSave.cReadyPages--;
1096 pVM->pgm.s.LiveSave.cDirtyPages++;
1097 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
1098 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
1099 }
1100 }
1101 break;
1102
1103 case PGM_PAGE_STATE_ZERO:
1104 if (!paLSPages[iPage].fZero)
1105 {
1106 paLSPages[iPage].fZero = 1;
1107 paLSPages[iPage].fShared = 0;
1108 if (!paLSPages[iPage].fDirty)
1109 {
1110 paLSPages[iPage].fDirty = 1;
1111 pVM->pgm.s.LiveSave.cReadyPages--;
1112 pVM->pgm.s.LiveSave.cDirtyPages++;
1113 }
1114 }
1115 break;
1116
1117 case PGM_PAGE_STATE_SHARED:
1118 if (!paLSPages[iPage].fShared)
1119 {
1120 paLSPages[iPage].fZero = 0;
1121 paLSPages[iPage].fShared = 1;
1122 if (!paLSPages[iPage].fDirty)
1123 {
1124 paLSPages[iPage].fDirty = 1;
1125 pVM->pgm.s.LiveSave.cReadyPages--;
1126 pVM->pgm.s.LiveSave.cDirtyPages++;
1127 }
1128 }
1129 break;
1130 }
1131 }
1132 else
1133 {
1134 /*
1135 * All other types => Ignore the page.
1136 */
1137 Assert(!paLSPages[iPage].fIgnore); /* skipped before switch */
1138 paLSPages[iPage].fIgnore = 1;
1139 if (paLSPages[iPage].fWriteMonitored)
1140 {
1141 /** @todo this doesn't hold water when we start monitoring MMIO2 and ROM shadow
1142 * pages! */
1143 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
1144 {
1145 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
1146 PGM_PAGE_SET_STATE(&pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
1147 Assert(pVM->pgm.s.cMonitoredPages > 0);
1148 pVM->pgm.s.cMonitoredPages--;
1149 }
1150 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
1151 {
1152 PGM_PAGE_CLEAR_WRITTEN_TO(&pCur->aPages[iPage]);
1153 Assert(pVM->pgm.s.cWrittenToPages > 0);
1154 pVM->pgm.s.cWrittenToPages--;
1155 }
1156 pVM->pgm.s.LiveSave.cMonitoredPages--;
1157 }
1158
1159 /** @todo the counting doesn't quite work out here. fix later? */
1160 if (paLSPages[iPage].fDirty)
1161 pVM->pgm.s.LiveSave.cDirtyPages--;
1162 else
1163 pVM->pgm.s.LiveSave.cReadyPages--;
1164 pVM->pgm.s.LiveSave.cIgnoredPages++;
1165 }
1166 } /* for each page in range */
1167
1168 if (GCPhysCur != 0)
1169 break; /* Yield + ramrange change */
1170 GCPhysCur = pCur->GCPhysLast;
1171 }
1172 } /* for each range */
1173 } while (pCur);
1174 pgmUnlock(pVM);
1175}
1176
1177
1178/**
1179 * Execute a live save pass.
1180 *
1181 * @returns VBox status code.
1182 *
1183 * @param pVM The VM handle.
1184 * @param pSSM The SSM handle.
1185 */
1186static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1187{
1188 int rc;
1189
1190 /*
1191 * Save the MMIO2 and ROM range IDs in pass 0.
1192 */
1193 if (uPass == 0)
1194 {
1195 rc = pgmR3SaveRomRanges(pVM, pSSM);
1196 if (RT_FAILURE(rc))
1197 return rc;
1198 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1199 if (RT_FAILURE(rc))
1200 return rc;
1201 }
1202
1203 /*
1204 * Do the scanning.
1205 */
1206 pgmR3LiveExecScanPages(pVM, false /*fFinalPass*/);
1207 pgmR3PoolClearAll(pVM); /** @todo this could perhaps be optimized a bit. */
1208
1209 /*
1210 * Save the pages.
1211 */
1212 if (uPass == 0)
1213 rc = pgmR3SaveRomVirginPages( pVM, pSSM, true /*fLiveSave*/);
1214 else
1215 rc = VINF_SUCCESS;
1216 if (RT_SUCCESS(rc))
1217 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
1218 if (RT_SUCCESS(rc))
1219 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
1220 if (RT_SUCCESS(rc))
1221 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, uPass);
1222 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
1223
1224 return rc;
1225}
1226
1227//#include <iprt/stream.h>
1228
1229/**
1230 * Votes on whether the live save phase is done or not.
1231 *
1232 * @returns VBox status code.
1233 *
1234 * @param pVM The VM handle.
1235 * @param pSSM The SSM handle.
1236 */
1237static DECLCALLBACK(int) pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM)
1238{
1239// RTPrintf("# Ready=%08x Dirty=%#08x Ignored=%#08x Monitored=%#08x MMIO2=%#08x\n",
1240// pVM->pgm.s.LiveSave.cReadyPages,
1241// pVM->pgm.s.LiveSave.cDirtyPages,
1242// pVM->pgm.s.LiveSave.cIgnoredPages,
1243// pVM->pgm.s.LiveSave.cMonitoredPages,
1244// pVM->pgm.s.LiveSave.cMmio2Pages
1245// );
1246// static int s_iHack = 0;
1247// if ((++s_iHack % 25) == 0)
1248// return VINF_SUCCESS;
1249
1250 if (pVM->pgm.s.LiveSave.cDirtyPages < 256) /* semi random number. */
1251 return VINF_SUCCESS;
1252 return VINF_SSM_VOTE_FOR_ANOTHER_PASS;
1253}
1254
1255#ifndef VBOX_WITH_LIVE_MIGRATION
1256
1257/**
1258 * Save zero indicator + bits for the specified page.
1259 *
1260 * @returns VBox status code, errors are logged/asserted before returning.
1261 * @param pVM The VM handle.
1262 * @param pSSH The saved state handle.
1263 * @param pPage The page to save.
1264 * @param GCPhys The address of the page.
1265 * @param pRam The ram range (for error logging).
1266 */
1267static int pgmR3SavePage(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1268{
1269 int rc;
1270 if (PGM_PAGE_IS_ZERO(pPage))
1271 rc = SSMR3PutU8(pSSM, 0);
1272 else
1273 {
1274 void const *pvPage;
1275 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvPage);
1276 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
1277
1278 SSMR3PutU8(pSSM, 1);
1279 rc = SSMR3PutMem(pSSM, pvPage, PAGE_SIZE);
1280 }
1281 return rc;
1282}
1283
1284
1285/**
1286 * Save a shadowed ROM page.
1287 *
1288 * Format: Type, protection, and two pages with zero indicators.
1289 *
1290 * @returns VBox status code, errors are logged/asserted before returning.
1291 * @param pVM The VM handle.
1292 * @param pSSH The saved state handle.
1293 * @param pPage The page to save.
1294 * @param GCPhys The address of the page.
1295 * @param pRam The ram range (for error logging).
1296 */
1297static int pgmR3SaveShadowedRomPage(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1298{
1299 /* Need to save both pages and the current state. */
1300 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
1301 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_INTERNAL_ERROR);
1302
1303 SSMR3PutU8(pSSM, PGMPAGETYPE_ROM_SHADOW);
1304 SSMR3PutU8(pSSM, pRomPage->enmProt);
1305
1306 int rc = pgmR3SavePage(pVM, pSSM, pPage, GCPhys, pRam);
1307 if (RT_SUCCESS(rc))
1308 {
1309 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
1310 rc = pgmR3SavePage(pVM, pSSM, pPagePassive, GCPhys, pRam);
1311 }
1312 return rc;
1313}
1314
1315#endif /* !VBOX_WITH_LIVE_MIGRATION */
1316
1317/**
1318 * Execute state save operation.
1319 *
1320 * @returns VBox status code.
1321 * @param pVM VM Handle.
1322 * @param pSSM SSM operation handle.
1323 */
1324static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
1325{
1326 int rc;
1327 unsigned i;
1328 PPGM pPGM = &pVM->pgm.s;
1329
1330 /*
1331 * Lock PGM and set the no-more-writes indicator.
1332 */
1333 pgmLock(pVM);
1334 pVM->pgm.s.fNoMorePhysWrites = true;
1335
1336 /*
1337 * Save basic data (required / unaffected by relocation).
1338 */
1339 SSMR3PutStruct(pSSM, pPGM, &s_aPGMFields[0]);
1340
1341 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1342 {
1343 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1344 SSMR3PutStruct(pSSM, &pVCpu->pgm.s, &s_aPGMCpuFields[0]);
1345 }
1346
1347 /*
1348 * The guest mappings.
1349 */
1350 i = 0;
1351 for (PPGMMAPPING pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3, i++)
1352 {
1353 SSMR3PutU32( pSSM, i);
1354 SSMR3PutStrZ( pSSM, pMapping->pszDesc); /* This is the best unique id we have... */
1355 SSMR3PutGCPtr( pSSM, pMapping->GCPtr);
1356 SSMR3PutGCUIntPtr(pSSM, pMapping->cPTs);
1357 }
1358 rc = SSMR3PutU32(pSSM, ~0); /* terminator. */
1359
1360#ifdef VBOX_WITH_LIVE_MIGRATION
1361 /*
1362 * Save the (remainder of the) memory.
1363 */
1364 if (RT_SUCCESS(rc))
1365 {
1366 if (pVM->pgm.s.LiveSave.fActive)
1367 {
1368 pgmR3LiveExecScanPages(pVM, true /*fFinalPass*/);
1369 rc = pgmR3SaveShadowedRomPages( pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
1370 if (RT_SUCCESS(rc))
1371 rc = pgmR3SaveMmio2Pages( pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
1372 if (RT_SUCCESS(rc))
1373 rc = pgmR3SaveRamPages( pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
1374 }
1375 else
1376 {
1377 rc = pgmR3SaveRomRanges(pVM, pSSM);
1378 if (RT_SUCCESS(rc))
1379 rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
1380 if (RT_SUCCESS(rc))
1381 rc = pgmR3SaveRomVirginPages( pVM, pSSM, false /*fLiveSave*/);
1382 if (RT_SUCCESS(rc))
1383 rc = pgmR3SaveShadowedRomPages(pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
1384 if (RT_SUCCESS(rc))
1385 rc = pgmR3SaveMmio2Pages( pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
1386 if (RT_SUCCESS(rc))
1387 rc = pgmR3SaveRamPages( pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
1388 }
1389 SSMR3PutU8(pSSM, PGM_STATE_REC_END); /* (Ignore the rc, SSM takes of it.) */
1390 }
1391
1392#else /* !VBOX_WITH_LIVE_MIGRATION */
1393 /*
1394 * Ram ranges and the memory they describe.
1395 */
1396 i = 0;
1397 for (PPGMRAMRANGE pRam = pPGM->pRamRangesR3; pRam; pRam = pRam->pNextR3, i++)
1398 {
1399 /*
1400 * Save the ram range details.
1401 */
1402 SSMR3PutU32(pSSM, i);
1403 SSMR3PutGCPhys(pSSM, pRam->GCPhys);
1404 SSMR3PutGCPhys(pSSM, pRam->GCPhysLast);
1405 SSMR3PutGCPhys(pSSM, pRam->cb);
1406 SSMR3PutU8(pSSM, !!pRam->pvR3); /* Boolean indicating memory or not. */
1407 SSMR3PutStrZ(pSSM, pRam->pszDesc); /* This is the best unique id we have... */
1408
1409 /*
1410 * Iterate the pages, only two special case.
1411 */
1412 uint32_t const cPages = pRam->cb >> PAGE_SHIFT;
1413 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1414 {
1415 RTGCPHYS GCPhysPage = pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
1416 PPGMPAGE pPage = &pRam->aPages[iPage];
1417 uint8_t uType = PGM_PAGE_GET_TYPE(pPage);
1418
1419 if (uType == PGMPAGETYPE_ROM_SHADOW) /** @todo This isn't right, but it doesn't currently matter. */
1420 rc = pgmR3SaveShadowedRomPage(pVM, pSSM, pPage, GCPhysPage, pRam);
1421 else if (uType == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1422 {
1423 /* MMIO2 alias -> MMIO; the device will just have to deal with this. */
1424 SSMR3PutU8(pSSM, PGMPAGETYPE_MMIO);
1425 rc = SSMR3PutU8(pSSM, 0 /* ZERO */);
1426 }
1427 else
1428 {
1429 SSMR3PutU8(pSSM, uType);
1430 rc = pgmR3SavePage(pVM, pSSM, pPage, GCPhysPage, pRam);
1431 }
1432 if (RT_FAILURE(rc))
1433 break;
1434 }
1435 if (RT_FAILURE(rc))
1436 break;
1437 }
1438
1439 rc = SSMR3PutU32(pSSM, ~0); /* terminator. */
1440#endif /* !VBOX_WITH_LIVE_MIGRATION */
1441
1442 pgmUnlock(pVM);
1443 return rc;
1444}
1445
1446
1447/**
1448 * Cleans up after an save state operation.
1449 *
1450 * @returns VBox status code.
1451 * @param pVM VM Handle.
1452 * @param pSSM SSM operation handle.
1453 */
1454static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
1455{
1456 /*
1457 * Free the tracking arrays and disable write monitoring.
1458 *
1459 * Play nice with the PGM lock in case we're called while the VM is still
1460 * running. This means we have to delay the freeing since we wish to use
1461 * paLSPages as an indicator of which RAM ranges which we need to scan for
1462 * write monitored pages.
1463 */
1464 void *pvToFree = NULL;
1465 PPGMRAMRANGE pCur;
1466 uint32_t cMonitoredPages = 0;
1467 pgmLock(pVM);
1468 do
1469 {
1470 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
1471 {
1472 if (pCur->paLSPages)
1473 {
1474 if (pvToFree)
1475 {
1476 uint32_t idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1477 pgmUnlock(pVM);
1478 MMR3HeapFree(pvToFree);
1479 pvToFree = NULL;
1480 pgmLock(pVM);
1481 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1482 break; /* start over again. */
1483 }
1484
1485 pvToFree = pCur->paLSPages;
1486 pCur->paLSPages = NULL;
1487
1488 uint32_t iPage = pCur->cb >> PAGE_SHIFT;
1489 while (iPage--)
1490 {
1491 PPGMPAGE pPage = &pCur->aPages[iPage];
1492 PGM_PAGE_CLEAR_WRITTEN_TO(pPage);
1493 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1494 {
1495 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1496 cMonitoredPages++;
1497 }
1498 }
1499 }
1500 }
1501 } while (pCur);
1502
1503 pVM->pgm.s.LiveSave.fActive = false;
1504
1505 Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages);
1506 if (pVM->pgm.s.cMonitoredPages < cMonitoredPages)
1507 pVM->pgm.s.cMonitoredPages = 0;
1508 else
1509 pVM->pgm.s.cMonitoredPages -= cMonitoredPages;
1510
1511 /** @todo this is blindly assuming that we're the only user of write
1512 * monitoring. Fix this when more users are added. */
1513 pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
1514 pgmUnlock(pVM);
1515
1516 MMR3HeapFree(pvToFree);
1517 pvToFree = NULL;
1518
1519 return VINF_SUCCESS;
1520}
1521
1522
1523/**
1524 * Prepare state load operation.
1525 *
1526 * @returns VBox status code.
1527 * @param pVM VM Handle.
1528 * @param pSSM SSM operation handle.
1529 */
1530static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
1531{
1532 /*
1533 * Call the reset function to make sure all the memory is cleared.
1534 */
1535 PGMR3Reset(pVM);
1536 pVM->pgm.s.LiveSave.fActive = false;
1537 NOREF(pSSM);
1538 return VINF_SUCCESS;
1539}
1540
1541
1542/**
1543 * Load an ignored page.
1544 *
1545 * @returns VBox status code.
1546 * @param pSSM The saved state handle.
1547 */
1548static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
1549{
1550 uint8_t abPage[PAGE_SIZE];
1551 return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
1552}
1553
1554
1555/**
1556 * Loads a page without any bits in the saved state, i.e. making sure it's
1557 * really zero.
1558 *
1559 * @returns VBox status code.
1560 * @param pVM The VM handle.
1561 * @param uType The page type or PGMPAGETYPE_INVALID (old saved
1562 * state).
1563 * @param pPage The guest page tracking structure.
1564 * @param GCPhys The page address.
1565 * @param pRam The ram range (logging).
1566 */
1567static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1568{
1569 if ( PGM_PAGE_GET_TYPE(pPage) != uType
1570 && uType != PGMPAGETYPE_INVALID)
1571 return VERR_SSM_UNEXPECTED_DATA;
1572
1573 /* I think this should be sufficient. */
1574 if (!PGM_PAGE_IS_ZERO(pPage))
1575 return VERR_SSM_UNEXPECTED_DATA;
1576
1577 NOREF(pVM);
1578 NOREF(GCPhys);
1579 NOREF(pRam);
1580 return VINF_SUCCESS;
1581}
1582
1583
1584/**
1585 * Loads a page from the saved state.
1586 *
1587 * @returns VBox status code.
1588 * @param pVM The VM handle.
1589 * @param pSSM The SSM handle.
1590 * @param uType The page type or PGMPAGETYEP_INVALID (old saved
1591 * state).
1592 * @param pPage The guest page tracking structure.
1593 * @param GCPhys The page address.
1594 * @param pRam The ram range (logging).
1595 */
1596static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1597{
1598 int rc;
1599
1600 /*
1601 * Match up the type, dealing with MMIO2 aliases (dropped).
1602 */
1603 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == uType
1604 || uType == PGMPAGETYPE_INVALID,
1605 ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
1606 VERR_SSM_UNEXPECTED_DATA);
1607
1608 /*
1609 * Load the page.
1610 */
1611 void *pvPage;
1612 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage);
1613 if (RT_SUCCESS(rc))
1614 rc = SSMR3GetMem(pSSM, pvPage, PAGE_SIZE);
1615
1616 return rc;
1617}
1618
1619
1620/**
1621 * Loads a page (counter part to pgmR3SavePage).
1622 *
1623 * @returns VBox status code, fully bitched errors.
1624 * @param pVM The VM handle.
1625 * @param pSSM The SSM handle.
1626 * @param uType The page type.
1627 * @param pPage The page.
1628 * @param GCPhys The page address.
1629 * @param pRam The RAM range (for error messages).
1630 */
1631static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1632{
1633 uint8_t uState;
1634 int rc = SSMR3GetU8(pSSM, &uState);
1635 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
1636 if (uState == 0 /* zero */)
1637 rc = pgmR3LoadPageZeroOld(pVM, uType, pPage, GCPhys, pRam);
1638 else if (uState == 1)
1639 rc = pgmR3LoadPageBitsOld(pVM, pSSM, uType, pPage, GCPhys, pRam);
1640 else
1641 rc = VERR_INTERNAL_ERROR;
1642 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uType=%d GCPhys=%RGp %s rc=%Rrc\n",
1643 pPage, uState, uType, GCPhys, pRam->pszDesc, rc),
1644 rc);
1645 return VINF_SUCCESS;
1646}
1647
1648
1649/**
1650 * Loads a shadowed ROM page.
1651 *
1652 * @returns VBox status code, errors are fully bitched.
1653 * @param pVM The VM handle.
1654 * @param pSSM The saved state handle.
1655 * @param pPage The page.
1656 * @param GCPhys The page address.
1657 * @param pRam The RAM range (for error messages).
1658 */
1659static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1660{
1661 /*
1662 * Load and set the protection first, then load the two pages, the first
1663 * one is the active the other is the passive.
1664 */
1665 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
1666 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_INTERNAL_ERROR);
1667
1668 uint8_t uProt;
1669 int rc = SSMR3GetU8(pSSM, &uProt);
1670 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
1671 PGMROMPROT enmProt = (PGMROMPROT)uProt;
1672 AssertLogRelMsgReturn( enmProt >= PGMROMPROT_INVALID
1673 && enmProt < PGMROMPROT_END,
1674 ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
1675 VERR_SSM_UNEXPECTED_DATA);
1676
1677 if (pRomPage->enmProt != enmProt)
1678 {
1679 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
1680 AssertLogRelRCReturn(rc, rc);
1681 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_INTERNAL_ERROR);
1682 }
1683
1684 PPGMPAGE pPageActive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
1685 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
1686 uint8_t u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM : PGMPAGETYPE_ROM_SHADOW;
1687 uint8_t u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
1688
1689 /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
1690 * used down the line (will the 2nd page will be written to the first
1691 * one because of a false TLB hit since the TLB is using GCPhys and
1692 * doesn't check the HCPhys of the desired page). */
1693 rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
1694 if (RT_SUCCESS(rc))
1695 {
1696 *pPageActive = *pPage;
1697 rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
1698 }
1699 return rc;
1700}
1701
1702/**
1703 * Ram range flags and bits for older versions of the saved state.
1704 *
1705 * @returns VBox status code.
1706 *
1707 * @param pVM The VM handle
1708 * @param pSSM The SSM handle.
1709 * @param uVersion The saved state version.
1710 */
1711static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
1712{
1713 PPGM pPGM = &pVM->pgm.s;
1714
1715 /*
1716 * Ram range flags and bits.
1717 */
1718 uint32_t i = 0;
1719 for (PPGMRAMRANGE pRam = pPGM->pRamRangesR3; ; pRam = pRam->pNextR3, i++)
1720 {
1721 /* Check the seqence number / separator. */
1722 uint32_t u32Sep;
1723 int rc = SSMR3GetU32(pSSM, &u32Sep);
1724 if (RT_FAILURE(rc))
1725 return rc;
1726 if (u32Sep == ~0U)
1727 break;
1728 if (u32Sep != i)
1729 {
1730 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
1731 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1732 }
1733 AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1734
1735 /* Get the range details. */
1736 RTGCPHYS GCPhys;
1737 SSMR3GetGCPhys(pSSM, &GCPhys);
1738 RTGCPHYS GCPhysLast;
1739 SSMR3GetGCPhys(pSSM, &GCPhysLast);
1740 RTGCPHYS cb;
1741 SSMR3GetGCPhys(pSSM, &cb);
1742 uint8_t fHaveBits;
1743 rc = SSMR3GetU8(pSSM, &fHaveBits);
1744 if (RT_FAILURE(rc))
1745 return rc;
1746 if (fHaveBits & ~1)
1747 {
1748 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
1749 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1750 }
1751 size_t cchDesc = 0;
1752 char szDesc[256];
1753 szDesc[0] = '\0';
1754 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
1755 {
1756 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
1757 if (RT_FAILURE(rc))
1758 return rc;
1759 /* Since we've modified the description strings in r45878, only compare
1760 them if the saved state is more recent. */
1761 if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
1762 cchDesc = strlen(szDesc);
1763 }
1764
1765 /*
1766 * Match it up with the current range.
1767 *
1768 * Note there is a hack for dealing with the high BIOS mapping
1769 * in the old saved state format, this means we might not have
1770 * a 1:1 match on success.
1771 */
1772 if ( ( GCPhys != pRam->GCPhys
1773 || GCPhysLast != pRam->GCPhysLast
1774 || cb != pRam->cb
1775 || ( cchDesc
1776 && strcmp(szDesc, pRam->pszDesc)) )
1777 /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
1778 && ( uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
1779 || GCPhys != UINT32_C(0xfff80000)
1780 || GCPhysLast != UINT32_C(0xffffffff)
1781 || pRam->GCPhysLast != GCPhysLast
1782 || pRam->GCPhys < GCPhys
1783 || !fHaveBits)
1784 )
1785 {
1786 LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
1787 "State : %RGp-%RGp %RGp bytes %s %s\n",
1788 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc,
1789 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
1790 /*
1791 * If we're loading a state for debugging purpose, don't make a fuss if
1792 * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
1793 */
1794 if ( SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
1795 || GCPhys < 8 * _1M)
1796 AssertFailedReturn(VERR_SSM_LOAD_CONFIG_MISMATCH);
1797
1798 AssertMsgFailed(("debug skipping not implemented, sorry\n"));
1799 continue;
1800 }
1801
1802 uint32_t cPages = (GCPhysLast - GCPhys + 1) >> PAGE_SHIFT;
1803 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
1804 {
1805 /*
1806 * Load the pages one by one.
1807 */
1808 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1809 {
1810 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
1811 PPGMPAGE pPage = &pRam->aPages[iPage];
1812 uint8_t uType;
1813 rc = SSMR3GetU8(pSSM, &uType);
1814 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
1815 if (uType == PGMPAGETYPE_ROM_SHADOW)
1816 rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
1817 else
1818 rc = pgmR3LoadPageOld(pVM, pSSM, uType, pPage, GCPhysPage, pRam);
1819 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
1820 }
1821 }
1822 else
1823 {
1824 /*
1825 * Old format.
1826 */
1827 AssertLogRelReturn(!pVM->pgm.s.fRamPreAlloc, VERR_NOT_SUPPORTED); /* can't be detected. */
1828
1829 /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
1830 The rest is generally irrelevant and wrong since the stuff have to match registrations. */
1831 uint32_t fFlags = 0;
1832 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1833 {
1834 uint16_t u16Flags;
1835 rc = SSMR3GetU16(pSSM, &u16Flags);
1836 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
1837 fFlags |= u16Flags;
1838 }
1839
1840 /* Load the bits */
1841 if ( !fHaveBits
1842 && GCPhysLast < UINT32_C(0xe0000000))
1843 {
1844 /*
1845 * Dynamic chunks.
1846 */
1847 const uint32_t cPagesInChunk = (1*1024*1024) >> PAGE_SHIFT;
1848 AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
1849 ("cPages=%#x cPagesInChunk=%#x\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
1850 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1851
1852 for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
1853 {
1854 uint8_t fPresent;
1855 rc = SSMR3GetU8(pSSM, &fPresent);
1856 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
1857 AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
1858 ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
1859 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1860
1861 for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
1862 {
1863 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
1864 PPGMPAGE pPage = &pRam->aPages[iPage];
1865 if (fPresent)
1866 {
1867 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
1868 rc = pgmR3LoadPageToDevNullOld(pSSM);
1869 else
1870 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
1871 }
1872 else
1873 rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
1874 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
1875 }
1876 }
1877 }
1878 else if (pRam->pvR3)
1879 {
1880 /*
1881 * MMIO2.
1882 */
1883 AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
1884 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
1885 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1886 AssertLogRelMsgReturn(pRam->pvR3,
1887 ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
1888 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1889
1890 rc = SSMR3GetMem(pSSM, pRam->pvR3, pRam->cb);
1891 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
1892 }
1893 else if (GCPhysLast < UINT32_C(0xfff80000))
1894 {
1895 /*
1896 * PCI MMIO, no pages saved.
1897 */
1898 }
1899 else
1900 {
1901 /*
1902 * Load the 0xfff80000..0xffffffff BIOS range.
1903 * It starts with X reserved pages that we have to skip over since
1904 * the RAMRANGE create by the new code won't include those.
1905 */
1906 AssertLogRelMsgReturn( !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
1907 && (fFlags & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
1908 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
1909 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1910 AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
1911 ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
1912 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1913
1914 /* Skip wasted reserved pages before the ROM. */
1915 while (GCPhys < pRam->GCPhys)
1916 {
1917 rc = pgmR3LoadPageToDevNullOld(pSSM);
1918 GCPhys += PAGE_SIZE;
1919 }
1920
1921 /* Load the bios pages. */
1922 cPages = pRam->cb >> PAGE_SHIFT;
1923 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1924 {
1925 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
1926 PPGMPAGE pPage = &pRam->aPages[iPage];
1927
1928 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
1929 ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
1930 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1931 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
1932 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
1933 }
1934 }
1935 }
1936 }
1937
1938 return VINF_SUCCESS;
1939}
1940
1941
1942/**
1943 * Worker for pgmR3Load and pgmR3LoadLocked.
1944 *
1945 * @returns VBox status code.
1946 *
1947 * @param pVM The VM handle.
1948 * @param pSSM The SSM handle.
1949 * @param uVersion The saved state version.
1950 */
1951static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1952{
1953 /*
1954 * Process page records until we hit the terminator.
1955 */
1956 RTGCPHYS GCPhys = NIL_RTGCPHYS;
1957 PPGMRAMRANGE pRamHint = NULL;
1958 uint8_t id = UINT8_MAX;
1959 uint32_t iPage = UINT32_MAX - 10;
1960 PPGMROMRANGE pRom = NULL;
1961 PPGMMMIO2RANGE pMmio2 = NULL;
1962 for (;;)
1963 {
1964 /*
1965 * Get the record type and flags.
1966 */
1967 uint8_t u8;
1968 int rc = SSMR3GetU8(pSSM, &u8);
1969 if (RT_FAILURE(rc))
1970 return rc;
1971 if (u8 == PGM_STATE_REC_END)
1972 return VINF_SUCCESS;
1973 AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1974 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
1975 {
1976 /*
1977 * RAM page.
1978 */
1979 case PGM_STATE_REC_RAM_ZERO:
1980 case PGM_STATE_REC_RAM_RAW:
1981 {
1982 /*
1983 * Get the address and resolve it into a page descriptor.
1984 */
1985 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
1986 GCPhys += PAGE_SIZE;
1987 else
1988 {
1989 rc = SSMR3GetGCPhys(pSSM, &GCPhys);
1990 if (RT_FAILURE(rc))
1991 return rc;
1992 }
1993 AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1994
1995 PPGMPAGE pPage;
1996 rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, GCPhys, &pPage, &pRamHint);
1997 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
1998
1999 /*
2000 * Take action according to the record type.
2001 */
2002 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2003 {
2004 case PGM_STATE_REC_RAM_ZERO:
2005 {
2006 if (PGM_PAGE_IS_ZERO(pPage))
2007 break;
2008 /** @todo implement zero page replacing. */
2009 AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_INTERNAL_ERROR_5);
2010 void *pvDstPage;
2011 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage);
2012 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2013 ASMMemZeroPage(pvDstPage);
2014 break;
2015 }
2016
2017 case PGM_STATE_REC_RAM_RAW:
2018 {
2019 void *pvDstPage;
2020 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage);
2021 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
2022 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2023 if (RT_FAILURE(rc))
2024 return rc;
2025 break;
2026 }
2027
2028 default:
2029 AssertMsgFailedReturn(("%#x\n", u8), VERR_INTERNAL_ERROR);
2030 }
2031 id = UINT8_MAX;
2032 iPage = UINT32_MAX - 10;
2033 break;
2034 }
2035
2036 /*
2037 * MMIO2 page.
2038 */
2039 case PGM_STATE_REC_MMIO2_RAW:
2040 case PGM_STATE_REC_MMIO2_ZERO:
2041 {
2042 /*
2043 * Get the ID + page number and resolved that into a MMIO2 page.
2044 */
2045 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2046 iPage++;
2047 else
2048 {
2049 SSMR3GetU8(pSSM, &id);
2050 rc = SSMR3GetU32(pSSM, &iPage);
2051 if (RT_FAILURE(rc))
2052 return rc;
2053 }
2054 if ( !pMmio2
2055 || pMmio2->idSavedState != id)
2056 {
2057 for (pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
2058 if (pMmio2->idSavedState == id)
2059 break;
2060 AssertLogRelMsgReturn(pMmio2, ("id=%#u iPage=%#x\n", id, iPage), VERR_INTERNAL_ERROR);
2061 }
2062 AssertLogRelMsgReturn(iPage < (pMmio2->RamRange.cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pMmio2->RamRange.cb, pMmio2->RamRange.pszDesc), VERR_INTERNAL_ERROR);
2063 void *pvDstPage = (uint8_t *)pMmio2->RamRange.pvR3 + ((size_t)iPage << PAGE_SHIFT);
2064
2065 /*
2066 * Load the page bits.
2067 */
2068 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_MMIO2_ZERO)
2069 ASMMemZeroPage(pvDstPage);
2070 else
2071 {
2072 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2073 if (RT_FAILURE(rc))
2074 return rc;
2075 }
2076 GCPhys = NIL_RTGCPHYS;
2077 break;
2078 }
2079
2080 /*
2081 * ROM pages.
2082 */
2083 case PGM_STATE_REC_ROM_VIRGIN:
2084 case PGM_STATE_REC_ROM_SHW_RAW:
2085 case PGM_STATE_REC_ROM_SHW_ZERO:
2086 case PGM_STATE_REC_ROM_PROT:
2087 {
2088 /*
2089 * Get the ID + page number and resolved that into a ROM page descriptor.
2090 */
2091 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
2092 iPage++;
2093 else
2094 {
2095 SSMR3GetU8(pSSM, &id);
2096 rc = SSMR3GetU32(pSSM, &iPage);
2097 if (RT_FAILURE(rc))
2098 return rc;
2099 }
2100 if ( !pRom
2101 || pRom->idSavedState != id)
2102 {
2103 for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
2104 if (pRom->idSavedState == id)
2105 break;
2106 AssertLogRelMsgReturn(pRom, ("id=%#u iPage=%#x\n", id, iPage), VERR_INTERNAL_ERROR);
2107 }
2108 AssertLogRelMsgReturn(iPage < (pRom->cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pRom->cb, pRom->pszDesc), VERR_INTERNAL_ERROR);
2109 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
2110 GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
2111
2112 /*
2113 * Get and set the protection.
2114 */
2115 uint8_t u8Prot;
2116 rc = SSMR3GetU8(pSSM, &u8Prot);
2117 if (RT_FAILURE(rc))
2118 return rc;
2119 PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
2120 AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_INTERNAL_ERROR);
2121
2122 if (enmProt != pRomPage->enmProt)
2123 {
2124 AssertLogRelMsgReturn(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED,
2125 ("GCPhys=%RGp enmProt=%d %s\n", GCPhys, enmProt, pRom->pszDesc),
2126 VERR_SSM_LOAD_CONFIG_MISMATCH);
2127 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
2128 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2129 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_INTERNAL_ERROR);
2130 }
2131 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
2132 break; /* done */
2133
2134 /*
2135 * Get the right page descriptor.
2136 */
2137 PPGMPAGE pRealPage;
2138 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2139 {
2140 case PGM_STATE_REC_ROM_VIRGIN:
2141 if (!PGMROMPROT_IS_ROM(enmProt))
2142 pRealPage = &pRomPage->Virgin;
2143 else
2144 pRealPage = NULL;
2145 break;
2146
2147 case PGM_STATE_REC_ROM_SHW_RAW:
2148 case PGM_STATE_REC_ROM_SHW_ZERO:
2149 AssertLogRelMsgReturn(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED,
2150 ("GCPhys=%RGp enmProt=%d %s\n", GCPhys, enmProt, pRom->pszDesc),
2151 VERR_SSM_LOAD_CONFIG_MISMATCH);
2152 if (PGMROMPROT_IS_ROM(enmProt))
2153 pRealPage = &pRomPage->Shadow;
2154 else
2155 pRealPage = NULL;
2156 break;
2157
2158 default: AssertLogRelFailedReturn(VERR_INTERNAL_ERROR); /* shut up gcc */
2159 }
2160 if (!pRealPage)
2161 {
2162 rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, GCPhys, &pRealPage, &pRamHint);
2163 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
2164 }
2165
2166 /*
2167 * Make it writable and map it (if necessary).
2168 */
2169 void *pvDstPage = NULL;
2170 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2171 {
2172 case PGM_STATE_REC_ROM_SHW_ZERO:
2173 if (PGM_PAGE_IS_ZERO(pRealPage))
2174 break;
2175 /** @todo implement zero page replacing. */
2176 /* fall thru */
2177 case PGM_STATE_REC_ROM_VIRGIN:
2178 case PGM_STATE_REC_ROM_SHW_RAW:
2179 {
2180 rc = pgmPhysPageMakeWritableAndMap(pVM, pRealPage, GCPhys, &pvDstPage);
2181 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
2182 break;
2183 }
2184 }
2185
2186 /*
2187 * Load the bits.
2188 */
2189 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
2190 {
2191 case PGM_STATE_REC_ROM_SHW_ZERO:
2192 if (pvDstPage)
2193 ASMMemZeroPage(pvDstPage);
2194 break;
2195
2196 case PGM_STATE_REC_ROM_VIRGIN:
2197 case PGM_STATE_REC_ROM_SHW_RAW:
2198 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
2199 if (RT_FAILURE(rc))
2200 return rc;
2201 break;
2202 }
2203 GCPhys = NIL_RTGCPHYS;
2204 break;
2205 }
2206
2207 /*
2208 * Unknown type.
2209 */
2210 default:
2211 AssertLogRelMsgFailedReturn(("%#x\n", u8), VERR_INTERNAL_ERROR);
2212 }
2213 } /* forever */
2214}
2215
2216
2217/**
2218 * Worker for pgmR3Load.
2219 *
2220 * @returns VBox status code.
2221 *
2222 * @param pVM The VM handle.
2223 * @param pSSM The SSM handle.
2224 * @param uVersion The saved state version.
2225 */
2226static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
2227{
2228 PPGM pPGM = &pVM->pgm.s;
2229 int rc;
2230 uint32_t u32Sep;
2231
2232 /*
2233 * Load basic data (required / unaffected by relocation).
2234 */
2235 if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
2236 {
2237 rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFields[0]);
2238 AssertLogRelRCReturn(rc, rc);
2239
2240 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2241 {
2242 rc = SSMR3GetStruct(pSSM, &pVM->aCpus[i].pgm.s, &s_aPGMCpuFields[0]);
2243 AssertLogRelRCReturn(rc, rc);
2244 }
2245 }
2246 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
2247 {
2248 AssertRelease(pVM->cCpus == 1);
2249
2250 PGMOLD pgmOld;
2251 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
2252 AssertLogRelRCReturn(rc, rc);
2253
2254 pPGM->fMappingsFixed = pgmOld.fMappingsFixed;
2255 pPGM->GCPtrMappingFixed = pgmOld.GCPtrMappingFixed;
2256 pPGM->cbMappingFixed = pgmOld.cbMappingFixed;
2257
2258 pVM->aCpus[0].pgm.s.fA20Enabled = pgmOld.fA20Enabled;
2259 pVM->aCpus[0].pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
2260 pVM->aCpus[0].pgm.s.enmGuestMode = pgmOld.enmGuestMode;
2261 }
2262 else
2263 {
2264 AssertRelease(pVM->cCpus == 1);
2265
2266 SSMR3GetBool(pSSM, &pPGM->fMappingsFixed);
2267 SSMR3GetGCPtr(pSSM, &pPGM->GCPtrMappingFixed);
2268 SSMR3GetU32(pSSM, &pPGM->cbMappingFixed);
2269
2270 uint32_t cbRamSizeIgnored;
2271 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
2272 if (RT_FAILURE(rc))
2273 return rc;
2274 SSMR3GetGCPhys(pSSM, &pVM->aCpus[0].pgm.s.GCPhysA20Mask);
2275
2276 uint32_t u32 = 0;
2277 SSMR3GetUInt(pSSM, &u32);
2278 pVM->aCpus[0].pgm.s.fA20Enabled = !!u32;
2279 SSMR3GetUInt(pSSM, &pVM->aCpus[0].pgm.s.fSyncFlags);
2280 RTUINT uGuestMode;
2281 SSMR3GetUInt(pSSM, &uGuestMode);
2282 pVM->aCpus[0].pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
2283
2284 /* check separator. */
2285 SSMR3GetU32(pSSM, &u32Sep);
2286 if (RT_FAILURE(rc))
2287 return rc;
2288 if (u32Sep != (uint32_t)~0)
2289 {
2290 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
2291 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2292 }
2293 }
2294
2295 /*
2296 * The guest mappings.
2297 */
2298 uint32_t i = 0;
2299 for (;; i++)
2300 {
2301 /* Check the seqence number / separator. */
2302 rc = SSMR3GetU32(pSSM, &u32Sep);
2303 if (RT_FAILURE(rc))
2304 return rc;
2305 if (u32Sep == ~0U)
2306 break;
2307 if (u32Sep != i)
2308 {
2309 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
2310 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
2311 }
2312
2313 /* get the mapping details. */
2314 char szDesc[256];
2315 szDesc[0] = '\0';
2316 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
2317 if (RT_FAILURE(rc))
2318 return rc;
2319 RTGCPTR GCPtr;
2320 SSMR3GetGCPtr(pSSM, &GCPtr);
2321 RTGCPTR cPTs;
2322 rc = SSMR3GetGCUIntPtr(pSSM, &cPTs);
2323 if (RT_FAILURE(rc))
2324 return rc;
2325
2326 /* find matching range. */
2327 PPGMMAPPING pMapping;
2328 for (pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3)
2329 if ( pMapping->cPTs == cPTs
2330 && !strcmp(pMapping->pszDesc, szDesc))
2331 break;
2332 AssertLogRelMsgReturn(pMapping, ("Couldn't find mapping: cPTs=%#x szDesc=%s (GCPtr=%RGv)\n",
2333 cPTs, szDesc, GCPtr),
2334 VERR_SSM_LOAD_CONFIG_MISMATCH);
2335
2336 /* relocate it. */
2337 if (pMapping->GCPtr != GCPtr)
2338 {
2339 AssertMsg((GCPtr >> X86_PD_SHIFT << X86_PD_SHIFT) == GCPtr, ("GCPtr=%RGv\n", GCPtr));
2340 pgmR3MapRelocate(pVM, pMapping, pMapping->GCPtr, GCPtr);
2341 }
2342 else
2343 Log(("pgmR3Load: '%s' needed no relocation (%RGv)\n", szDesc, GCPtr));
2344 }
2345
2346 /*
2347 * Load the RAM contents.
2348 */
2349 if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
2350 {
2351 if (!pVM->pgm.s.LiveSave.fActive)
2352 {
2353 rc = pgmR3LoadRomRanges(pVM, pSSM);
2354 if (RT_FAILURE(rc))
2355 return rc;
2356 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
2357 if (RT_FAILURE(rc))
2358 return rc;
2359 }
2360
2361 return pgmR3LoadMemory(pVM, pSSM, SSM_PASS_FINAL);
2362 }
2363 return pgmR3LoadMemoryOld(pVM, pSSM, uVersion);
2364}
2365
2366
2367/**
2368 * Execute state load operation.
2369 *
2370 * @returns VBox status code.
2371 * @param pVM VM Handle.
2372 * @param pSSM SSM operation handle.
2373 * @param uVersion Data layout version.
2374 * @param uPass The data pass.
2375 */
2376static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
2377{
2378 int rc;
2379 PPGM pPGM = &pVM->pgm.s;
2380
2381 /*
2382 * Validate version.
2383 */
2384 if ( ( uPass != SSM_PASS_FINAL
2385 && uVersion != PGM_SAVED_STATE_VERSION)
2386 || ( uVersion != PGM_SAVED_STATE_VERSION
2387 && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
2388 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
2389 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
2390 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
2391 )
2392 {
2393 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
2394 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
2395 }
2396
2397 /*
2398 * Do the loading while owning the lock because a bunch of the functions
2399 * we're using requires this.
2400 */
2401 if (uPass != SSM_PASS_FINAL)
2402 {
2403 pgmLock(pVM);
2404 if (uPass != 0)
2405 rc = pgmR3LoadMemory(pVM, pSSM, uPass);
2406 else
2407 {
2408 pVM->pgm.s.LiveSave.fActive = true;
2409 rc = pgmR3LoadRomRanges(pVM, pSSM);
2410 if (RT_SUCCESS(rc))
2411 rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
2412 if (RT_SUCCESS(rc))
2413 rc = pgmR3LoadMemory(pVM, pSSM, uPass);
2414 }
2415 pgmUnlock(pVM);
2416 }
2417 else
2418 {
2419 pgmLock(pVM);
2420 rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
2421 pVM->pgm.s.LiveSave.fActive = false;
2422 pgmUnlock(pVM);
2423 if (RT_SUCCESS(rc))
2424 {
2425 /*
2426 * We require a full resync now.
2427 */
2428 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2429 {
2430 PVMCPU pVCpu = &pVM->aCpus[i];
2431 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2432 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2433
2434 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
2435 }
2436
2437 pgmR3HandlerPhysicalUpdateAll(pVM);
2438
2439 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2440 {
2441 PVMCPU pVCpu = &pVM->aCpus[i];
2442
2443 /*
2444 * Change the paging mode.
2445 */
2446 rc = PGMR3ChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode);
2447
2448 /* Restore pVM->pgm.s.GCPhysCR3. */
2449 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
2450 RTGCPHYS GCPhysCR3 = CPUMGetGuestCR3(pVCpu);
2451 if ( pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE
2452 || pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE_NX
2453 || pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64
2454 || pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
2455 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAE_PAGE_MASK);
2456 else
2457 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAGE_MASK);
2458 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2459 }
2460 }
2461 }
2462
2463 return rc;
2464}
2465
2466
2467/**
2468 * Registers the saved state callbacks with SSM.
2469 *
2470 * @returns VBox status code.
2471 * @param pVM Pointer to VM structure.
2472 * @param cbRam The RAM size.
2473 */
2474int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
2475{
2476 return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
2477 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
2478 NULL, pgmR3SaveExec, pgmR3SaveDone,
2479 pgmR3LoadPrep, pgmR3Load, NULL);
2480}
2481
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette