VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMSavedState.cpp@ 23425

Last change on this file since 23425 was 23415, checked in by vboxsync, 15 years ago

PGM: Saved state hacking and some minor cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 73.1 KB
Line 
1/* $Id: PGMSavedState.cpp 23415 2009-09-29 15:40:39Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, The Saved State Part.
4 */
5
6/*
7 * Copyright (C) 2006-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/dbgf.h>
28#include <VBox/pgm.h>
29#include <VBox/cpum.h>
30#include <VBox/iom.h>
31#include <VBox/sup.h>
32#include <VBox/mm.h>
33#include <VBox/em.h>
34#include <VBox/stam.h>
35#include <VBox/rem.h>
36#include <VBox/selm.h>
37#include <VBox/ssm.h>
38#include <VBox/hwaccm.h>
39#include "PGMInternal.h"
40#include <VBox/vm.h>
41
42#include <VBox/dbg.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46#include <iprt/asm.h>
47#include <iprt/assert.h>
48#include <iprt/env.h>
49#include <iprt/mem.h>
50#include <iprt/file.h>
51#include <iprt/string.h>
52#include <iprt/thread.h>
53
54
55/*******************************************************************************
56* Defined Constants And Macros *
57*******************************************************************************/
58/** Saved state data unit version. */
59#ifdef VBOX_WITH_LIVE_MIGRATION
60# define PGM_SAVED_STATE_VERSION 10
61#else
62# define PGM_SAVED_STATE_VERSION 9
63#endif
64/** Saved state data unit version for 3.0. (pre live migration) */
65#define PGM_SAVED_STATE_VERSION_3_0_0 9
66/** Saved state data unit version for 2.2.2 and later. */
67#define PGM_SAVED_STATE_VERSION_2_2_2 8
68/** Saved state data unit version for 2.2.0. */
69#define PGM_SAVED_STATE_VERSION_RR_DESC 7
70/** Saved state data unit version. */
71#define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE 6
72
73
74/** @name Sparse state record types
75 * @{ */
76/** Zero page. No data. */
77#define PGM_STATE_REC_ZERO UINT8_C(0x00)
78/** Raw page. */
79#define PGM_STATE_REC_RAW UINT8_C(0x01)
80/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
81#define PGM_STATE_REC_ROM_VIRGIN UINT8_C(0x02)
82/** Raw shadowed ROM page. The protection (8-bit) preceeds the raw bits. */
83#define PGM_STATE_REC_ROM_SHADOW UINT8_C(0x03)
84/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
85#define PGM_STATE_REC_ROM_SHADOW_ZERO UINT8_C(0x04)
86/** ROM protection (8-bit). */
87#define PGM_STATE_REC_ROM_PROT UINT8_C(0x05)
88/** The last record type. */
89#define PGM_STATE_REC_LAST PGM_STATE_REC_ROM_PROT
90/** End marker. */
91#define PGM_STATE_REC_END UINT8_C(0xff)
92/** Flag indicating that the data is preceeded by an RTGCPHYS containing the
93 * page address. If not set, the page follows the immediately after the
94 * previous one. */
95#define PGM_STATE_REC_FLAG_ADDR UINT8_C(0x80)
96/** @} */
97
98
99/*******************************************************************************
100* Structures and Typedefs *
101*******************************************************************************/
102/** For loading old saved states. (pre-smp) */
103typedef struct
104{
105 /** If set no conflict checks are required. (boolean) */
106 bool fMappingsFixed;
107 /** Size of fixed mapping */
108 uint32_t cbMappingFixed;
109 /** Base address (GC) of fixed mapping */
110 RTGCPTR GCPtrMappingFixed;
111 /** A20 gate mask.
112 * Our current approach to A20 emulation is to let REM do it and don't bother
113 * anywhere else. The interesting Guests will be operating with it enabled anyway.
114 * But whould need arrise, we'll subject physical addresses to this mask. */
115 RTGCPHYS GCPhysA20Mask;
116 /** A20 gate state - boolean! */
117 bool fA20Enabled;
118 /** The guest paging mode. */
119 PGMMODE enmGuestMode;
120} PGMOLD;
121
122
123/*******************************************************************************
124* Global Variables *
125*******************************************************************************/
126/** PGM fields to save/load. */
127static const SSMFIELD s_aPGMFields[] =
128{
129 SSMFIELD_ENTRY( PGM, fMappingsFixed),
130 SSMFIELD_ENTRY_GCPTR( PGM, GCPtrMappingFixed),
131 SSMFIELD_ENTRY( PGM, cbMappingFixed),
132 SSMFIELD_ENTRY_TERM()
133};
134
135static const SSMFIELD s_aPGMCpuFields[] =
136{
137 SSMFIELD_ENTRY( PGMCPU, fA20Enabled),
138 SSMFIELD_ENTRY_GCPHYS( PGMCPU, GCPhysA20Mask),
139 SSMFIELD_ENTRY( PGMCPU, enmGuestMode),
140 SSMFIELD_ENTRY_TERM()
141};
142
143static const SSMFIELD s_aPGMFields_Old[] =
144{
145 SSMFIELD_ENTRY( PGMOLD, fMappingsFixed),
146 SSMFIELD_ENTRY_GCPTR( PGMOLD, GCPtrMappingFixed),
147 SSMFIELD_ENTRY( PGMOLD, cbMappingFixed),
148 SSMFIELD_ENTRY( PGMOLD, fA20Enabled),
149 SSMFIELD_ENTRY_GCPHYS( PGMOLD, GCPhysA20Mask),
150 SSMFIELD_ENTRY( PGMOLD, enmGuestMode),
151 SSMFIELD_ENTRY_TERM()
152};
153
154
155/**
156 * Find the ROM tracking structure for the given page.
157 *
158 * @returns Pointer to the ROM page structure. NULL if the caller didn't check
159 * that it's a ROM page.
160 * @param pVM The VM handle.
161 * @param GCPhys The address of the ROM page.
162 */
163static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
164{
165 for (PPGMROMRANGE pRomRange = pVM->pgm.s.CTX_SUFF(pRomRanges);
166 pRomRange;
167 pRomRange = pRomRange->CTX_SUFF(pNext))
168 {
169 RTGCPHYS off = GCPhys - pRomRange->GCPhys;
170 if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
171 return &pRomRange->aPages[off >> PAGE_SHIFT];
172 }
173 return NULL;
174}
175
176
177/**
178 * Prepare for a live save operation.
179 *
180 * This will attempt to allocate and initialize the tracking structures. It
181 * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
182 * pgmR3SaveDone will do the cleanups.
183 *
184 * @returns VBox status code.
185 *
186 * @param pVM The VM handle.
187 * @param pSSM The SSM handle.
188 */
189static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
190{
191 /*
192 * Indicate that we will be using the write monitoring.
193 */
194 pgmLock(pVM);
195 /** @todo find a way of mediating this when more users are added. */
196 if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
197 {
198 pgmUnlock(pVM);
199 AssertLogRelFailedReturn(VERR_INTERNAL_ERROR_2);
200 }
201 pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
202 pgmUnlock(pVM);
203
204 /*
205 * Initialize the statistics.
206 */
207 pVM->pgm.s.LiveSave.cReadyPages = 0;
208 pVM->pgm.s.LiveSave.cDirtyPages = 0;
209 pVM->pgm.s.LiveSave.cMmioPages = 0;
210 pVM->pgm.s.LiveSave.fActive = true;
211
212 /*
213 * Try allocating tracking structures for the ram ranges.
214 *
215 * To avoid lock contention, we leave the lock every time we're allocating
216 * a new array. This means we'll have to ditch the allocation and start
217 * all over again if the RAM range list changes in-between.
218 *
219 * Note! pgmR3SaveDone will always be called and it is therefore responsible
220 * for cleaning up.
221 */
222 PPGMRAMRANGE pCur;
223 pgmLock(pVM);
224 do
225 {
226 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
227 {
228 if ( !pCur->paLSPages
229 && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
230 {
231 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
232 uint32_t const cPages = pCur->cb >> PAGE_SHIFT;
233 pgmUnlock(pVM);
234 PPGMLIVESAVEPAGE paLSPages = (PPGMLIVESAVEPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVEPAGE));
235 if (!paLSPages)
236 return VERR_NO_MEMORY;
237 pgmLock(pVM);
238 if (pVM->pgm.s.idRamRangesGen != idRamRangesGen)
239 {
240 pgmUnlock(pVM);
241 MMR3HeapFree(paLSPages);
242 pgmLock(pVM);
243 break; /* try again */
244 }
245 pCur->paLSPages = paLSPages;
246
247 /*
248 * Initialize the array.
249 */
250 uint32_t iPage = cPages;
251 while (iPage-- > 0)
252 {
253 /** @todo yield critsect! (after moving this away from EMT0) */
254 PCPGMPAGE pPage = &pCur->aPages[iPage];
255 paLSPages[iPage].uPassSaved = UINT32_MAX;
256 paLSPages[iPage].cDirtied = 0;
257 paLSPages[iPage].fDirty = 1; /* everything is dirty at this time */
258 paLSPages[iPage].fWriteMonitored = 0;
259 paLSPages[iPage].fWriteMonitoredJustNow = 0;
260 paLSPages[iPage].u2Reserved = 0;
261 switch (PGM_PAGE_GET_TYPE(pPage))
262 {
263 case PGMPAGETYPE_RAM:
264 if (PGM_PAGE_IS_ZERO(pPage))
265 {
266 paLSPages[iPage].fZero = 1;
267 paLSPages[iPage].fShared = 0;
268 }
269 else if (PGM_PAGE_IS_SHARED(pPage))
270 {
271 paLSPages[iPage].fZero = 0;
272 paLSPages[iPage].fShared = 1;
273 }
274 else
275 {
276 paLSPages[iPage].fZero = 0;
277 paLSPages[iPage].fShared = 0;
278 }
279 paLSPages[iPage].fMmio = 0;
280 pVM->pgm.s.LiveSave.cDirtyPages++;
281 break;
282
283 case PGMPAGETYPE_ROM_SHADOW:
284 case PGMPAGETYPE_ROM:
285 {
286 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
287 pRomPage->LiveSave.u8Prot = (uint8_t)pRomPage->enmProt;
288 pRomPage->LiveSave.fSavedVirgin = false;
289 pRomPage->LiveSave.fDone = false;
290 pRomPage->LiveSave.fWrittenTo = PGMROMPROT_IS_ROM(pRomPage->enmProt)
291 ? !PGM_PAGE_IS_ZERO(&pRomPage->Shadow)
292 : !PGM_PAGE_IS_ZERO(pPage);
293 paLSPages[iPage].fZero = !pRomPage->LiveSave.fWrittenTo;
294 paLSPages[iPage].fShared = 0;
295 paLSPages[iPage].fDirty = 1;
296 paLSPages[iPage].fMmio = 0;
297 pVM->pgm.s.LiveSave.cDirtyPages += 2;
298 break;
299 }
300
301 default:
302 AssertMsgFailed(("%R[pgmpage]", pPage));
303 case PGMPAGETYPE_MMIO2:
304 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
305 paLSPages[iPage].fZero = 0;
306 paLSPages[iPage].fShared = 0;
307 paLSPages[iPage].fDirty = 1;
308 paLSPages[iPage].fMmio = 1;
309 pVM->pgm.s.LiveSave.cMmioPages++;
310 break;
311
312 case PGMPAGETYPE_MMIO:
313 paLSPages[iPage].fZero = 1;
314 paLSPages[iPage].fShared = 0;
315 paLSPages[iPage].fDirty = 1;
316 paLSPages[iPage].fMmio = 1;
317 pVM->pgm.s.LiveSave.cMmioPages++;
318 break;
319 }
320 }
321 }
322 }
323 } while (pCur);
324 pgmUnlock(pVM);
325
326 return VINF_SUCCESS;
327}
328
329
330/**
331 * Takes care of the virgin ROM pages in the first pass.
332 *
333 * This is an attempt at simplifying the handling of ROM pages a little bit.
334 * This ASSUMES that no new ROM ranges will be added and that they won't be
335 * relinked in any way.
336 *
337 * @param pVM The VM handle.
338 * @param pSSM The SSM handle.
339 */
340static int pgmR3LiveExecSaveVirginRomPages(PVM pVM, PSSMHANDLE pSSM)
341{
342 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
343 pgmLock(pVM);
344 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
345 {
346 uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
347 for (uint32_t iPage = 0; iPage < cPages; iPage++)
348 {
349 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
350 PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;
351
352 /* Get the virgin page descriptor. */
353 PPGMPAGE pPage;
354 if (PGMROMPROT_IS_ROM(enmProt))
355 pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
356 else
357 pPage = &pRom->aPages[iPage].Virgin;
358
359 /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
360 int rc = VINF_SUCCESS;
361 char abPage[PAGE_SIZE];
362 if (!PGM_PAGE_IS_ZERO(pPage))
363 {
364 void *pvPage;
365 PPGMPAGEMAP pMapIgnored;
366 rc = pgmPhysPageMap(pVM, pPage, GCPhys, &pMapIgnored, &pvPage);
367 if (RT_SUCCESS(rc))
368 memcpy(abPage, pvPage, PAGE_SIZE);
369 }
370 else
371 ASMMemZeroPage(abPage);
372 pgmUnlock(pVM);
373 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
374
375 /* Save it. */
376 if (GCPhys == GCPhysLast + PAGE_SIZE)
377 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
378 else
379 {
380 SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
381 SSMR3PutGCPhys(pSSM, GCPhys);
382 }
383 SSMR3PutU8(pSSM, (uint8_t)enmProt);
384 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
385 if (RT_FAILURE(rc))
386 return rc;
387
388 /* Update state. */
389 pgmLock(pVM);
390 GCPhysLast = GCPhys;
391 pRom->aPages[iPage].LiveSave.fSavedVirgin = true;
392 pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
393 pVM->pgm.s.LiveSave.cDirtyPages--;
394 pVM->pgm.s.LiveSave.cReadyPages++;
395
396 /* If the page cannot be shadowed, mark it as done. */
397 PPGMRAMRANGE pRam;
398 pPage = pgmPhysGetPageAndRange(&pVM->pgm.s, GCPhys, &pRam);
399 PPGMLIVESAVEPAGE pLSPage = &pRam->paLSPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
400 if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
401 {
402 pLSPage->fDirty = 0;
403 pRom->aPages[iPage].LiveSave.fWrittenTo = false;
404 pRom->aPages[iPage].LiveSave.fDone = true;
405 pVM->pgm.s.LiveSave.cDirtyPages--;
406 pVM->pgm.s.LiveSave.cReadyPages++;
407 }
408 }
409 }
410 pgmUnlock(pVM);
411 return VINF_SUCCESS;
412}
413
414
415/**
416 * pgmR3LiveExec part 1: Scan for page modifications and reprotect them.
417 *
418 * Note! Since we don't care about MMIO or MMIO2 pages and since we don't
419 * have any movable ROMs yet, we can safely yield the PGM when we
420 * detect contention.
421 *
422 * This holds true for part 2 as well.
423 *
424 * @param pVM The VM handle.
425 */
426static void pgmR3LiveExecPart1(PVM pVM)
427{
428 RTGCPHYS GCPhysCur = 0;
429 PPGMRAMRANGE pCur;
430 pgmLock(pVM);
431 do
432 {
433 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
434 uint32_t cSinceYield = 0;
435 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
436 {
437 if (pCur->GCPhysLast > GCPhysCur)
438 {
439 PPGMLIVESAVEPAGE paLSPages = pCur->paLSPages;
440 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
441 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
442 GCPhysCur = 0;
443 for (; iPage < cPages; iPage++, cSinceYield++)
444 {
445 /* Do yield first. */
446 if ( (cSinceYield & 0x7ff) == 0x7ff
447 && PDMR3CritSectYield(&pVM->pgm.s.CritSect)
448 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
449 {
450 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
451 break; /* restart */
452 }
453
454 /* Process the page. */
455 if (paLSPages[iPage].fMmio)
456 continue;
457 switch (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]))
458 {
459 case PGMPAGETYPE_RAM:
460 {
461 switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
462 {
463 case PGM_PAGE_STATE_ALLOCATED:
464 /** @todo Optimize this: Don't always re-enable write
465 * monitoring if the page is known to be very busy. */
466 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
467 {
468 Assert(paLSPages[iPage].fWriteMonitored);
469 PGM_PAGE_CLEAR_WRITTEN_TO(&pCur->aPages[iPage]);
470 Assert(pVM->pgm.s.cWrittenToPages > 0);
471 pVM->pgm.s.cWrittenToPages--;
472 }
473 else
474 {
475 Assert(!paLSPages[iPage].fWriteMonitored);
476 pVM->pgm.s.LiveSave.cMonitoredPages++;
477 }
478
479 if (!paLSPages[iPage].fDirty)
480 {
481 pVM->pgm.s.LiveSave.cDirtyPages++;
482 pVM->pgm.s.LiveSave.cReadyPages--;
483 if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
484 paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
485 }
486
487 PGM_PAGE_SET_STATE(&pCur->aPages[iPage], PGM_PAGE_STATE_WRITE_MONITORED);
488 pVM->pgm.s.cMonitoredPages++;
489 paLSPages[iPage].fWriteMonitored = 1;
490 paLSPages[iPage].fWriteMonitoredJustNow = 1;
491 paLSPages[iPage].fDirty = 1;
492 paLSPages[iPage].fZero = 0;
493 paLSPages[iPage].fShared = 0;
494 break;
495
496 case PGM_PAGE_STATE_WRITE_MONITORED:
497 Assert(paLSPages[iPage].fWriteMonitored);
498 paLSPages[iPage].fWriteMonitoredJustNow = 0;
499 break;
500
501 case PGM_PAGE_STATE_ZERO:
502 if (!paLSPages[iPage].fZero)
503 {
504 paLSPages[iPage].fZero = 1;
505 paLSPages[iPage].fShared = 0;
506 if (!paLSPages[iPage].fDirty)
507 {
508 paLSPages[iPage].fDirty = 1;
509 pVM->pgm.s.LiveSave.cReadyPages--;
510 pVM->pgm.s.LiveSave.cDirtyPages++;
511 }
512 }
513 break;
514
515 case PGM_PAGE_STATE_SHARED:
516 if (!paLSPages[iPage].fShared)
517 {
518 paLSPages[iPage].fZero = 0;
519 paLSPages[iPage].fShared = 1;
520 if (!paLSPages[iPage].fDirty)
521 {
522 paLSPages[iPage].fDirty = 1;
523 pVM->pgm.s.LiveSave.cReadyPages--;
524 pVM->pgm.s.LiveSave.cDirtyPages++;
525 }
526 }
527 break;
528 }
529 break;
530 }
531
532 /* All writes to the shadow page are intercepted. */
533 case PGMPAGETYPE_ROM_SHADOW: /* (The shadow page is active.) */
534 case PGMPAGETYPE_ROM: /* (The virgin page is active.) */
535 {
536 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
537 if (!pRomPage->LiveSave.fDone)
538 {
539 if (pRomPage->LiveSave.fWrittenTo)
540 {
541 pRomPage->LiveSave.fWrittenTo = false;
542 if (!paLSPages[iPage].fDirty)
543 {
544 paLSPages[iPage].fDirty = 1;
545 pVM->pgm.s.LiveSave.cReadyPages--;
546 pVM->pgm.s.LiveSave.cDirtyPages++;
547 }
548 paLSPages[iPage].fWriteMonitoredJustNow = 1;
549 }
550 else
551 paLSPages[iPage].fWriteMonitoredJustNow = 0;
552 paLSPages[iPage].fWriteMonitored = 1;
553 }
554 else
555 {
556 Assert(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_ROM);
557 Assert(!paLSPages[iPage].fDirty);
558 }
559 break;
560 }
561
562 default:
563 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage]));
564 case PGMPAGETYPE_MMIO2:
565 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
566 case PGMPAGETYPE_MMIO:
567 if (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_MMIO)
568 {
569 paLSPages[iPage].fZero = 0;
570 paLSPages[iPage].fDirty = 1;
571 paLSPages[iPage].fMmio = 1;
572 }
573 else
574 {
575 paLSPages[iPage].fZero = 1;
576 paLSPages[iPage].fDirty = 1;
577 paLSPages[iPage].fMmio = 1;
578 }
579 if (paLSPages[iPage].fWriteMonitored)
580 {
581 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
582 {
583 AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
584 PGM_PAGE_SET_STATE(&pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
585 Assert(pVM->pgm.s.cMonitoredPages > 0);
586 pVM->pgm.s.cMonitoredPages--;
587 }
588 if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
589 {
590 PGM_PAGE_CLEAR_WRITTEN_TO(&pCur->aPages[iPage]);
591 Assert(pVM->pgm.s.cWrittenToPages > 0);
592 pVM->pgm.s.cWrittenToPages--;
593 }
594 pVM->pgm.s.LiveSave.cMonitoredPages--;
595 }
596 pVM->pgm.s.LiveSave.cMmioPages++;
597 break;
598 } /* switch on page type */
599 } /* for each page in range */
600
601 if (GCPhysCur != 0)
602 break; /* Yield + ramrange change */
603 GCPhysCur = pCur->GCPhysLast;
604 }
605 } /* for each range */
606 } while (pCur);
607 pgmUnlock(pVM);
608}
609
610
611/**
612 * pgmR3LiveExec part 2: Save quiescent pages.
613 *
614 * @returns VBox status code.
615 * @param pVM The VM handle.
616 * @param pSSM The SSM handle.
617 * @param uPass The pass.
618 */
619static int pgmR3LiveExecPart2(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
620{
621 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
622 RTGCPHYS GCPhysCur = 0;
623 PPGMRAMRANGE pCur;
624 pgmLock(pVM);
625 do
626 {
627 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen;
628 uint32_t cSinceYield = 0;
629 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
630 {
631 if (pCur->GCPhysLast > GCPhysCur)
632 {
633 PPGMLIVESAVEPAGE paLSPages = pCur->paLSPages;
634 uint32_t cPages = pCur->cb >> PAGE_SHIFT;
635 uint32_t iPage = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
636 GCPhysCur = 0;
637 for (; iPage < cPages; iPage++, cSinceYield++)
638 {
639 /* Do yield first. */
640 if ( (cSinceYield & 0x7ff) == 0x7ff
641 && PDMR3CritSectYield(&pVM->pgm.s.CritSect)
642 && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
643 {
644 GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
645 break; /* restart */
646 }
647
648 /*
649 * Save dirty pages that hasn't changed since part 1.
650 * (Use if instead of switch here so we can easily break out of the loop.)
651 */
652 if ( paLSPages[iPage].fDirty
653 && !paLSPages[iPage].fMmio
654 && !paLSPages[iPage].fWriteMonitoredJustNow
655 && ( ( PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM
656 && PGM_PAGE_GET_STATE(&pCur->aPages[iPage])
657 == ( paLSPages[iPage].fZero
658 ? PGM_PAGE_STATE_ZERO
659 : paLSPages[iPage].fShared
660 ? PGM_PAGE_STATE_SHARED
661 : PGM_PAGE_STATE_WRITE_MONITORED)
662 )
663 || PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_ROM
664 || PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_ROM_SHADOW
665 )
666 )
667 {
668 int rc;
669 char abPage[PAGE_SIZE];
670 RTGCPHYS GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
671
672 if ( PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM
673 && PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) != PGM_PAGE_STATE_ZERO)
674 {
675 /*
676 * Copy the page and then save it outside the lock (since any
677 * SSM call may block).
678 */
679 void const *pvPage;
680 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage);
681 if (RT_SUCCESS(rc))
682 memcpy(abPage, pvPage, PAGE_SIZE);
683 pgmUnlock(pVM);
684 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
685
686 if (GCPhys == GCPhysLast + PAGE_SIZE)
687 SSMR3PutU8(pSSM, PGM_STATE_REC_RAW);
688 else
689 {
690 SSMR3PutU8(pSSM, PGM_STATE_REC_RAW | PGM_STATE_REC_FLAG_ADDR);
691 SSMR3PutGCPhys(pSSM, GCPhys);
692 }
693 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
694 }
695 else if ( PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM
696 && PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_ZERO)
697 {
698 /*
699 * Dirty zero page.
700 */
701 pgmUnlock(pVM);
702
703 if (GCPhys == GCPhysLast + PAGE_SIZE)
704 rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAW);
705 else
706 {
707 SSMR3PutU8(pSSM, PGM_STATE_REC_RAW | PGM_STATE_REC_FLAG_ADDR);
708 rc = SSMR3PutGCPhys(pSSM, GCPhys);
709 }
710 }
711 else
712 {
713 /*
714 * Dirty shadow ROM page.
715 */
716 Assert( PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM
717 || PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_ROM_SHADOW);
718 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
719 if (pRomPage->LiveSave.fWrittenTo)
720 continue; /* modified already, skip it. */
721
722 PGMROMPROT enmProt = pRomPage->enmProt;
723 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pCur->aPages[iPage];
724 bool fZero = PGM_PAGE_IS_ZERO(pPage);
725 if (!fZero)
726 {
727 void *pvPage;
728 PPGMPAGEMAP pMapIgnored;
729 rc = pgmPhysPageMap(pVM, pPage, GCPhys, &pMapIgnored, &pvPage);
730 if (RT_SUCCESS(rc))
731 memcpy(abPage, pvPage, PAGE_SIZE);
732 }
733 pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
734 pgmUnlock(pVM);
735 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);
736
737 if (GCPhys == GCPhysLast + PAGE_SIZE)
738 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHADOW_ZERO : PGM_STATE_REC_ROM_SHADOW));
739 else
740 {
741 SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHADOW_ZERO : PGM_STATE_REC_ROM_SHADOW) | PGM_STATE_REC_FLAG_ADDR);
742 SSMR3PutGCPhys(pSSM, GCPhys);
743 }
744 rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
745 if (!fZero)
746 rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
747 }
748
749 /* common tail */
750 if (RT_FAILURE(rc))
751 return rc;
752
753 pgmLock(pVM);
754 GCPhysLast = GCPhys;
755 paLSPages[iPage].fDirty = 0;
756 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
757 {
758 GCPhysCur = GCPhys | PAGE_OFFSET_MASK;
759 break; /* restart */
760 }
761 }
762 } /* for each page in range */
763
764 if (GCPhysCur != 0)
765 break; /* Yield + ramrange change */
766 GCPhysCur = pCur->GCPhysLast;
767 }
768 } /* for each range */
769 } while (pCur);
770 pgmUnlock(pVM);
771
772 return VINF_SUCCESS;
773}
774
775
776/**
777 * Execute a live save pass.
778 *
779 * @returns VBox status code.
780 *
781 * @param pVM The VM handle.
782 * @param pSSM The SSM handle.
783 */
784static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
785{
786 if (uPass == 0)
787 {
788 int rc = pgmR3LiveExecSaveVirginRomPages(pVM, pSSM);
789 if (RT_FAILURE(rc))
790 return rc;
791 }
792 pgmR3LiveExecPart1(pVM);
793 pgmR3PoolClearAll(pVM); /** @todo this could perhaps be optimized a bit. */
794 return pgmR3LiveExecPart2(pVM, pSSM, uPass);
795}
796
797
798/**
799 * Votes on whether the live save phase is done or not.
800 *
801 * @returns VBox status code.
802 *
803 * @param pVM The VM handle.
804 * @param pSSM The SSM handle.
805 */
806static DECLCALLBACK(int) pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM)
807{
808 return VINF_SUCCESS;
809}
810
811#ifdef VBOX_WITH_LIVE_MIGRATION
812
813/**
814 * Worker for pgmR3SaveExec that saves the memory.
815 *
816 * @returns VBox status code.
817 *
818 * @param pVM The VM handle.
819 * @param pSSM The SSM handle.
820 * @param fLiveSave Whether we're in a live save or not.
821 *
822 */
823static int pgmR3SaveExecMemory(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
824{
825 /*
826 * Save all ROM pages.
827 */
828
829 /*
830 * Save all MMIO2 pages (including unmapped ones).
831 */
832
833 /*
834 * Save all normal RAM pages.
835 */
836 return VINF_SUCCESS;
837}
838
839#else /* !VBOX_WITH_LIVE_MIGRATION */
840
841/**
842 * Save zero indicator + bits for the specified page.
843 *
844 * @returns VBox status code, errors are logged/asserted before returning.
845 * @param pVM The VM handle.
846 * @param pSSH The saved state handle.
847 * @param pPage The page to save.
848 * @param GCPhys The address of the page.
849 * @param pRam The ram range (for error logging).
850 */
851static int pgmR3SavePage(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
852{
853 int rc;
854 if (PGM_PAGE_IS_ZERO(pPage))
855 rc = SSMR3PutU8(pSSM, 0);
856 else
857 {
858 void const *pvPage;
859 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvPage);
860 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
861
862 SSMR3PutU8(pSSM, 1);
863 rc = SSMR3PutMem(pSSM, pvPage, PAGE_SIZE);
864 }
865 return rc;
866}
867
868
869/**
870 * Save a shadowed ROM page.
871 *
872 * Format: Type, protection, and two pages with zero indicators.
873 *
874 * @returns VBox status code, errors are logged/asserted before returning.
875 * @param pVM The VM handle.
876 * @param pSSH The saved state handle.
877 * @param pPage The page to save.
878 * @param GCPhys The address of the page.
879 * @param pRam The ram range (for error logging).
880 */
881static int pgmR3SaveShadowedRomPage(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
882{
883 /* Need to save both pages and the current state. */
884 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
885 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_INTERNAL_ERROR);
886
887 SSMR3PutU8(pSSM, PGMPAGETYPE_ROM_SHADOW);
888 SSMR3PutU8(pSSM, pRomPage->enmProt);
889
890 int rc = pgmR3SavePage(pVM, pSSM, pPage, GCPhys, pRam);
891 if (RT_SUCCESS(rc))
892 {
893 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
894 rc = pgmR3SavePage(pVM, pSSM, pPagePassive, GCPhys, pRam);
895 }
896 return rc;
897}
898
899#endif /* !VBOX_WITH_LIVE_MIGRATION */
900
901/**
902 * Execute state save operation.
903 *
904 * @returns VBox status code.
905 * @param pVM VM Handle.
906 * @param pSSM SSM operation handle.
907 */
908static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
909{
910 int rc;
911 unsigned i;
912 PPGM pPGM = &pVM->pgm.s;
913
914 /*
915 * Lock PGM and set the no-more-writes indicator.
916 */
917 pgmLock(pVM);
918 pVM->pgm.s.fNoMorePhysWrites = true;
919
920 /*
921 * Save basic data (required / unaffected by relocation).
922 */
923 SSMR3PutStruct(pSSM, pPGM, &s_aPGMFields[0]);
924
925 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
926 {
927 PVMCPU pVCpu = &pVM->aCpus[idCpu];
928 SSMR3PutStruct(pSSM, &pVCpu->pgm.s, &s_aPGMCpuFields[0]);
929 }
930
931 /*
932 * The guest mappings.
933 */
934 i = 0;
935 for (PPGMMAPPING pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3, i++)
936 {
937 SSMR3PutU32( pSSM, i);
938 SSMR3PutStrZ( pSSM, pMapping->pszDesc); /* This is the best unique id we have... */
939 SSMR3PutGCPtr( pSSM, pMapping->GCPtr);
940 SSMR3PutGCUIntPtr(pSSM, pMapping->cPTs);
941 }
942 rc = SSMR3PutU32(pSSM, ~0); /* terminator. */
943
944#ifdef VBOX_WITH_LIVE_MIGRATION
945 /*
946 * Save the (remainder of the) memory.
947 */
948 if (RT_SUCCESS(rc))
949 {
950 if (pVM->pgm.s.LiveSave.fActive)
951 pgmR3LiveExecPart1(pVM);
952 rc = pgmR3SaveExecMemory(pVM, pSSM, pVM->pgm.s.LiveSave.fActive);
953 }
954
955#else /* !VBOX_WITH_LIVE_MIGRATION */
956 /*
957 * Ram ranges and the memory they describe.
958 */
959 i = 0;
960 for (PPGMRAMRANGE pRam = pPGM->pRamRangesR3; pRam; pRam = pRam->pNextR3, i++)
961 {
962 /*
963 * Save the ram range details.
964 */
965 SSMR3PutU32(pSSM, i);
966 SSMR3PutGCPhys(pSSM, pRam->GCPhys);
967 SSMR3PutGCPhys(pSSM, pRam->GCPhysLast);
968 SSMR3PutGCPhys(pSSM, pRam->cb);
969 SSMR3PutU8(pSSM, !!pRam->pvR3); /* Boolean indicating memory or not. */
970 SSMR3PutStrZ(pSSM, pRam->pszDesc); /* This is the best unique id we have... */
971
972 /*
973 * Iterate the pages, only two special case.
974 */
975 uint32_t const cPages = pRam->cb >> PAGE_SHIFT;
976 for (uint32_t iPage = 0; iPage < cPages; iPage++)
977 {
978 RTGCPHYS GCPhysPage = pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
979 PPGMPAGE pPage = &pRam->aPages[iPage];
980 uint8_t uType = PGM_PAGE_GET_TYPE(pPage);
981
982 if (uType == PGMPAGETYPE_ROM_SHADOW) /** @todo This isn't right, but it doesn't currently matter. */
983 rc = pgmR3SaveShadowedRomPage(pVM, pSSM, pPage, GCPhysPage, pRam);
984 else if (uType == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
985 {
986 /* MMIO2 alias -> MMIO; the device will just have to deal with this. */
987 SSMR3PutU8(pSSM, PGMPAGETYPE_MMIO);
988 rc = SSMR3PutU8(pSSM, 0 /* ZERO */);
989 }
990 else
991 {
992 SSMR3PutU8(pSSM, uType);
993 rc = pgmR3SavePage(pVM, pSSM, pPage, GCPhysPage, pRam);
994 }
995 if (RT_FAILURE(rc))
996 break;
997 }
998 if (RT_FAILURE(rc))
999 break;
1000 }
1001
1002 rc = SSMR3PutU32(pSSM, ~0); /* terminator. */
1003#endif /* !VBOX_WITH_LIVE_MIGRATION */
1004
1005 pgmUnlock(pVM);
1006 return rc;
1007}
1008
1009
1010/**
1011 * Cleans up after an save state operation.
1012 *
1013 * @returns VBox status code.
1014 * @param pVM VM Handle.
1015 * @param pSSM SSM operation handle.
1016 */
1017static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
1018{
1019 /*
1020 * Free the tracking arrays and disable write monitoring.
1021 *
1022 * Play nice with the PGM lock in case we're called while the VM is still
1023 * running. This means we have to delay the freeing since we wish to use
1024 * paLSPages as an indicator of which RAM ranges which we need to scan for
1025 * write monitored pages.
1026 */
1027 void *pvToFree = NULL;
1028 PPGMRAMRANGE pCur;
1029 uint32_t cMonitoredPages = 0;
1030 pgmLock(pVM);
1031 do
1032 {
1033 for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
1034 {
1035 if (pCur->paLSPages)
1036 {
1037 if (pvToFree)
1038 {
1039 uint32_t idRamRangesGen = pVM->pgm.s.idRamRangesGen;
1040 pgmUnlock(pVM);
1041 MMR3HeapFree(pvToFree);
1042 pvToFree = NULL;
1043 pgmLock(pVM);
1044 if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
1045 break; /* start over again. */
1046 }
1047
1048 pvToFree = pCur->paLSPages;
1049 pCur->paLSPages = NULL;
1050
1051 uint32_t iPage = pCur->cb >> PAGE_SHIFT;
1052 while (iPage--)
1053 {
1054 PPGMPAGE pPage = &pCur->aPages[iPage];
1055 PGM_PAGE_CLEAR_WRITTEN_TO(pPage);
1056 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1057 {
1058 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1059 cMonitoredPages++;
1060 }
1061 }
1062 }
1063 }
1064 } while (pCur);
1065
1066 pVM->pgm.s.LiveSave.fActive = false;
1067
1068 /** @todo this is blindly assuming that we're the only user of write
1069 * monitoring. Fix this when more users are added. */
1070 pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
1071 pgmUnlock(pVM);
1072
1073 MMR3HeapFree(pvToFree);
1074 pvToFree = NULL;
1075
1076 return VINF_SUCCESS;
1077}
1078
1079
1080/**
1081 * Prepare state load operation.
1082 *
1083 * @returns VBox status code.
1084 * @param pVM VM Handle.
1085 * @param pSSM SSM operation handle.
1086 */
1087static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
1088{
1089 /*
1090 * Call the reset function to make sure all the memory is cleared.
1091 */
1092 PGMR3Reset(pVM);
1093 NOREF(pSSM);
1094 return VINF_SUCCESS;
1095}
1096
1097
1098/**
1099 * Load an ignored page.
1100 *
1101 * @returns VBox status code.
1102 * @param pSSM The saved state handle.
1103 */
1104static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
1105{
1106 uint8_t abPage[PAGE_SIZE];
1107 return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
1108}
1109
1110
1111/**
1112 * Loads a page without any bits in the saved state, i.e. making sure it's
1113 * really zero.
1114 *
1115 * @returns VBox status code.
1116 * @param pVM The VM handle.
1117 * @param uType The page type or PGMPAGETYPE_INVALID (old saved
1118 * state).
1119 * @param pPage The guest page tracking structure.
1120 * @param GCPhys The page address.
1121 * @param pRam The ram range (logging).
1122 */
1123static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1124{
1125 if ( PGM_PAGE_GET_TYPE(pPage) != uType
1126 && uType != PGMPAGETYPE_INVALID)
1127 return VERR_SSM_UNEXPECTED_DATA;
1128
1129 /* I think this should be sufficient. */
1130 if (!PGM_PAGE_IS_ZERO(pPage))
1131 return VERR_SSM_UNEXPECTED_DATA;
1132
1133 NOREF(pVM);
1134 NOREF(GCPhys);
1135 NOREF(pRam);
1136 return VINF_SUCCESS;
1137}
1138
1139
1140/**
1141 * Loads a page from the saved state.
1142 *
1143 * @returns VBox status code.
1144 * @param pVM The VM handle.
1145 * @param pSSM The SSM handle.
1146 * @param uType The page type or PGMPAGETYEP_INVALID (old saved
1147 * state).
1148 * @param pPage The guest page tracking structure.
1149 * @param GCPhys The page address.
1150 * @param pRam The ram range (logging).
1151 */
1152static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1153{
1154 int rc;
1155
1156 /*
1157 * Match up the type, dealing with MMIO2 aliases (dropped).
1158 */
1159 AssertLogRelMsgReturn( PGM_PAGE_GET_TYPE(pPage) == uType
1160 || uType == PGMPAGETYPE_INVALID,
1161 ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
1162 VERR_SSM_UNEXPECTED_DATA);
1163
1164 /*
1165 * Load the page.
1166 */
1167 void *pvPage;
1168 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage);
1169 if (RT_SUCCESS(rc))
1170 rc = SSMR3GetMem(pSSM, pvPage, PAGE_SIZE);
1171
1172 return rc;
1173}
1174
1175
1176/**
1177 * Loads a page (counter part to pgmR3SavePage).
1178 *
1179 * @returns VBox status code, fully bitched errors.
1180 * @param pVM The VM handle.
1181 * @param pSSM The SSM handle.
1182 * @param uType The page type.
1183 * @param pPage The page.
1184 * @param GCPhys The page address.
1185 * @param pRam The RAM range (for error messages).
1186 */
1187static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1188{
1189 uint8_t uState;
1190 int rc = SSMR3GetU8(pSSM, &uState);
1191 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
1192 if (uState == 0 /* zero */)
1193 rc = pgmR3LoadPageZeroOld(pVM, uType, pPage, GCPhys, pRam);
1194 else if (uState == 1)
1195 rc = pgmR3LoadPageBitsOld(pVM, pSSM, uType, pPage, GCPhys, pRam);
1196 else
1197 rc = VERR_INTERNAL_ERROR;
1198 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uType=%d GCPhys=%RGp %s rc=%Rrc\n",
1199 pPage, uState, uType, GCPhys, pRam->pszDesc, rc),
1200 rc);
1201 return VINF_SUCCESS;
1202}
1203
1204
1205/**
1206 * Loads a shadowed ROM page.
1207 *
1208 * @returns VBox status code, errors are fully bitched.
1209 * @param pVM The VM handle.
1210 * @param pSSM The saved state handle.
1211 * @param pPage The page.
1212 * @param GCPhys The page address.
1213 * @param pRam The RAM range (for error messages).
1214 */
1215static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
1216{
1217 /*
1218 * Load and set the protection first, then load the two pages, the first
1219 * one is the active the other is the passive.
1220 */
1221 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
1222 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_INTERNAL_ERROR);
1223
1224 uint8_t uProt;
1225 int rc = SSMR3GetU8(pSSM, &uProt);
1226 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
1227 PGMROMPROT enmProt = (PGMROMPROT)uProt;
1228 AssertLogRelMsgReturn( enmProt >= PGMROMPROT_INVALID
1229 && enmProt < PGMROMPROT_END,
1230 ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
1231 VERR_SSM_UNEXPECTED_DATA);
1232
1233 if (pRomPage->enmProt != enmProt)
1234 {
1235 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
1236 AssertLogRelRCReturn(rc, rc);
1237 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_INTERNAL_ERROR);
1238 }
1239
1240 PPGMPAGE pPageActive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
1241 PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
1242 uint8_t u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM : PGMPAGETYPE_ROM_SHADOW;
1243 uint8_t u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;
1244
1245 /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
1246 * used down the line (will the 2nd page will be written to the first
1247 * one because of a false TLB hit since the TLB is using GCPhys and
1248 * doesn't check the HCPhys of the desired page). */
1249 rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
1250 if (RT_SUCCESS(rc))
1251 {
1252 *pPageActive = *pPage;
1253 rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
1254 }
1255 return rc;
1256}
1257
1258/**
1259 * Ram range flags and bits for older versions of the saved state.
1260 *
1261 * @returns VBox status code.
1262 *
1263 * @param pVM The VM handle
1264 * @param pSSM The SSM handle.
1265 * @param uVersion The saved state version.
1266 */
1267static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
1268{
1269 PPGM pPGM = &pVM->pgm.s;
1270
1271 /*
1272 * Ram range flags and bits.
1273 */
1274 uint32_t i = 0;
1275 for (PPGMRAMRANGE pRam = pPGM->pRamRangesR3; ; pRam = pRam->pNextR3, i++)
1276 {
1277 /* Check the seqence number / separator. */
1278 uint32_t u32Sep;
1279 int rc = SSMR3GetU32(pSSM, &u32Sep);
1280 if (RT_FAILURE(rc))
1281 return rc;
1282 if (u32Sep == ~0U)
1283 break;
1284 if (u32Sep != i)
1285 {
1286 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
1287 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1288 }
1289 AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1290
1291 /* Get the range details. */
1292 RTGCPHYS GCPhys;
1293 SSMR3GetGCPhys(pSSM, &GCPhys);
1294 RTGCPHYS GCPhysLast;
1295 SSMR3GetGCPhys(pSSM, &GCPhysLast);
1296 RTGCPHYS cb;
1297 SSMR3GetGCPhys(pSSM, &cb);
1298 uint8_t fHaveBits;
1299 rc = SSMR3GetU8(pSSM, &fHaveBits);
1300 if (RT_FAILURE(rc))
1301 return rc;
1302 if (fHaveBits & ~1)
1303 {
1304 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
1305 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1306 }
1307 size_t cchDesc = 0;
1308 char szDesc[256];
1309 szDesc[0] = '\0';
1310 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
1311 {
1312 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
1313 if (RT_FAILURE(rc))
1314 return rc;
1315 /* Since we've modified the description strings in r45878, only compare
1316 them if the saved state is more recent. */
1317 if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
1318 cchDesc = strlen(szDesc);
1319 }
1320
1321 /*
1322 * Match it up with the current range.
1323 *
1324 * Note there is a hack for dealing with the high BIOS mapping
1325 * in the old saved state format, this means we might not have
1326 * a 1:1 match on success.
1327 */
1328 if ( ( GCPhys != pRam->GCPhys
1329 || GCPhysLast != pRam->GCPhysLast
1330 || cb != pRam->cb
1331 || ( cchDesc
1332 && strcmp(szDesc, pRam->pszDesc)) )
1333 /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
1334 && ( uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
1335 || GCPhys != UINT32_C(0xfff80000)
1336 || GCPhysLast != UINT32_C(0xffffffff)
1337 || pRam->GCPhysLast != GCPhysLast
1338 || pRam->GCPhys < GCPhys
1339 || !fHaveBits)
1340 )
1341 {
1342 LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
1343 "State : %RGp-%RGp %RGp bytes %s %s\n",
1344 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc,
1345 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
1346 /*
1347 * If we're loading a state for debugging purpose, don't make a fuss if
1348 * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
1349 */
1350 if ( SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
1351 || GCPhys < 8 * _1M)
1352 AssertFailedReturn(VERR_SSM_LOAD_CONFIG_MISMATCH);
1353
1354 AssertMsgFailed(("debug skipping not implemented, sorry\n"));
1355 continue;
1356 }
1357
1358 uint32_t cPages = (GCPhysLast - GCPhys + 1) >> PAGE_SHIFT;
1359 if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
1360 {
1361 /*
1362 * Load the pages one by one.
1363 */
1364 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1365 {
1366 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
1367 PPGMPAGE pPage = &pRam->aPages[iPage];
1368 uint8_t uType;
1369 rc = SSMR3GetU8(pSSM, &uType);
1370 AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
1371 if (uType == PGMPAGETYPE_ROM_SHADOW)
1372 rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
1373 else
1374 rc = pgmR3LoadPageOld(pVM, pSSM, uType, pPage, GCPhysPage, pRam);
1375 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
1376 }
1377 }
1378 else
1379 {
1380 /*
1381 * Old format.
1382 */
1383 AssertLogRelReturn(!pVM->pgm.s.fRamPreAlloc, VERR_NOT_SUPPORTED); /* can't be detected. */
1384
1385 /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
1386 The rest is generally irrelevant and wrong since the stuff have to match registrations. */
1387 uint32_t fFlags = 0;
1388 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1389 {
1390 uint16_t u16Flags;
1391 rc = SSMR3GetU16(pSSM, &u16Flags);
1392 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
1393 fFlags |= u16Flags;
1394 }
1395
1396 /* Load the bits */
1397 if ( !fHaveBits
1398 && GCPhysLast < UINT32_C(0xe0000000))
1399 {
1400 /*
1401 * Dynamic chunks.
1402 */
1403 const uint32_t cPagesInChunk = (1*1024*1024) >> PAGE_SHIFT;
1404 AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
1405 ("cPages=%#x cPagesInChunk=%#x\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
1406 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1407
1408 for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
1409 {
1410 uint8_t fPresent;
1411 rc = SSMR3GetU8(pSSM, &fPresent);
1412 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
1413 AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
1414 ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
1415 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1416
1417 for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
1418 {
1419 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
1420 PPGMPAGE pPage = &pRam->aPages[iPage];
1421 if (fPresent)
1422 {
1423 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
1424 rc = pgmR3LoadPageToDevNullOld(pSSM);
1425 else
1426 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
1427 }
1428 else
1429 rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
1430 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
1431 }
1432 }
1433 }
1434 else if (pRam->pvR3)
1435 {
1436 /*
1437 * MMIO2.
1438 */
1439 AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
1440 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
1441 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1442 AssertLogRelMsgReturn(pRam->pvR3,
1443 ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
1444 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1445
1446 rc = SSMR3GetMem(pSSM, pRam->pvR3, pRam->cb);
1447 AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
1448 }
1449 else if (GCPhysLast < UINT32_C(0xfff80000))
1450 {
1451 /*
1452 * PCI MMIO, no pages saved.
1453 */
1454 }
1455 else
1456 {
1457 /*
1458 * Load the 0xfff80000..0xffffffff BIOS range.
1459 * It starts with X reserved pages that we have to skip over since
1460 * the RAMRANGE create by the new code won't include those.
1461 */
1462 AssertLogRelMsgReturn( !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
1463 && (fFlags & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
1464 ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
1465 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1466 AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
1467 ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
1468 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1469
1470 /* Skip wasted reserved pages before the ROM. */
1471 while (GCPhys < pRam->GCPhys)
1472 {
1473 rc = pgmR3LoadPageToDevNullOld(pSSM);
1474 GCPhys += PAGE_SIZE;
1475 }
1476
1477 /* Load the bios pages. */
1478 cPages = pRam->cb >> PAGE_SHIFT;
1479 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1480 {
1481 RTGCPHYS const GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
1482 PPGMPAGE pPage = &pRam->aPages[iPage];
1483
1484 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
1485 ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
1486 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1487 rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
1488 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
1489 }
1490 }
1491 }
1492 }
1493
1494 return VINF_SUCCESS;
1495}
1496
1497
1498/**
1499 * Worker for pgmR3Load and pgmR3LoadLocked.
1500 *
1501 * @returns VBox status code.
1502 *
1503 * @param pVM The VM handle.
1504 * @param pSSM The SSM handle.
1505 * @param uVersion The saved state version.
1506 */
1507static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
1508{
1509 /*
1510 * Process page records until we hit the terminator.
1511 */
1512 PPGMRAMRANGE pRamHint = NULL;
1513 RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
1514 for (;;)
1515 {
1516 /* Get the record type and flags. */
1517 uint8_t u8;
1518 int rc = SSMR3GetU8(pSSM, &u8);
1519 if (RT_FAILURE(rc))
1520 return rc;
1521 if (u8 == PGM_STATE_REC_END)
1522 return VINF_SUCCESS;
1523 AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1524
1525 /* Get the address. */
1526 RTGCPHYS GCPhys;
1527 if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
1528 {
1529 AssertLogRelReturn(GCPhysLast != NIL_RTGCPHYS, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1530 GCPhys = GCPhysLast + PAGE_SIZE;
1531 }
1532 else
1533 {
1534 rc = SSMR3GetGCPhys(pSSM, &GCPhys);
1535 if (RT_FAILURE(rc))
1536 return rc;
1537 AssertLogRelMsgReturn(GCPhys & PAGE_OFFSET_MASK, ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1538 }
1539
1540 /* Get the ram range and page. */
1541 PPGMPAGE pPage;
1542 rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, GCPhys, &pPage, &pRamHint);
1543 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
1544
1545 /*
1546 * Take action according to the record type.
1547 */
1548 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
1549 {
1550 case PGM_STATE_REC_ZERO:
1551 {
1552 if (PGM_PAGE_IS_ZERO(pPage))
1553 break;
1554 /** @todo implement zero page replacing. */
1555 AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_INTERNAL_ERROR_5);
1556 void *pvDstPage;
1557 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage);
1558 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
1559 ASMMemZeroPage(pvDstPage);
1560 break;
1561 }
1562
1563 case PGM_STATE_REC_RAW:
1564 {
1565 void *pvDstPage;
1566 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage);
1567 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
1568 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
1569 if (RT_FAILURE(rc))
1570 return rc;
1571 break;
1572 }
1573
1574 case PGM_STATE_REC_ROM_VIRGIN:
1575 case PGM_STATE_REC_ROM_SHADOW:
1576 case PGM_STATE_REC_ROM_SHADOW_ZERO:
1577 case PGM_STATE_REC_ROM_PROT:
1578 {
1579 PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
1580 AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp\n", GCPhys), VERR_INTERNAL_ERROR);
1581
1582 uint8_t u8Prot;
1583 rc = SSMR3GetU8(pSSM, &u8Prot);
1584 if (RT_FAILURE(rc))
1585 return rc;
1586 PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
1587 AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_INTERNAL_ERROR);
1588
1589 /* Make the protection change. */
1590 if (enmProt != pRomPage->enmProt)
1591 {
1592 rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
1593 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
1594 AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_INTERNAL_ERROR);
1595 }
1596 if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
1597 break; /* done */
1598
1599 /* Get the right page descriptor. */
1600 PPGMPAGE pRealPage;
1601 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
1602 {
1603 case PGM_STATE_REC_ROM_VIRGIN:
1604 if (!PGMROMPROT_IS_ROM(enmProt))
1605 pRealPage = &pRomPage->Virgin;
1606 else
1607 pRealPage = pPage;
1608 break;
1609
1610 case PGM_STATE_REC_ROM_SHADOW:
1611 case PGM_STATE_REC_ROM_SHADOW_ZERO:
1612 if (PGMROMPROT_IS_ROM(enmProt))
1613 pRealPage = &pRomPage->Shadow;
1614 else
1615 pRealPage = pPage;
1616 break;
1617 }
1618
1619 /* Map it if necessary. */
1620 void *pvDstPage = NULL;
1621 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
1622 {
1623 case PGM_STATE_REC_ROM_SHADOW_ZERO:
1624 if (PGM_PAGE_IS_ZERO(pRealPage))
1625 break;
1626 /** @todo implement zero page replacing. */
1627 /* fall thru */
1628 case PGM_STATE_REC_ROM_VIRGIN:
1629 case PGM_STATE_REC_ROM_SHADOW:
1630 {
1631 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pRealPage) != PGM_PAGE_STATE_ALLOCATED))
1632 {
1633 rc = pgmPhysPageMakeWritable(pVM, pRealPage, GCPhys);
1634 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
1635 }
1636 PPGMPAGEMAP pMapIgnored;
1637 rc = pgmPhysPageMap(pVM, pRealPage, GCPhys, &pMapIgnored, &pvDstPage);
1638 AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
1639 break;
1640 }
1641 }
1642
1643 /* Load the bits. */
1644 switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
1645 {
1646 case PGM_STATE_REC_ROM_SHADOW_ZERO:
1647 if (pvDstPage)
1648 ASMMemZeroPage(pvDstPage);
1649 break;
1650
1651 case PGM_STATE_REC_ROM_VIRGIN:
1652 case PGM_STATE_REC_ROM_SHADOW:
1653 rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
1654 if (RT_FAILURE(rc))
1655 return rc;
1656 break;
1657 }
1658 break;
1659 }
1660
1661 default:
1662 AssertMsgFailedReturn(("%#x\n", u8), VERR_INTERNAL_ERROR);
1663 }
1664 }
1665}
1666
1667
1668/**
1669 * Worker for pgmR3Load.
1670 *
1671 * @returns VBox status code.
1672 *
1673 * @param pVM The VM handle.
1674 * @param pSSM The SSM handle.
1675 * @param uVersion The saved state version.
1676 */
1677static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
1678{
1679 PPGM pPGM = &pVM->pgm.s;
1680 int rc;
1681 uint32_t u32Sep;
1682
1683 /*
1684 * Load basic data (required / unaffected by relocation).
1685 */
1686 if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
1687 {
1688 rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFields[0]);
1689 AssertLogRelRCReturn(rc, rc);
1690
1691 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1692 {
1693 rc = SSMR3GetStruct(pSSM, &pVM->aCpus[i].pgm.s, &s_aPGMCpuFields[0]);
1694 AssertLogRelRCReturn(rc, rc);
1695 }
1696 }
1697 else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
1698 {
1699 AssertRelease(pVM->cCpus == 1);
1700
1701 PGMOLD pgmOld;
1702 rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
1703 AssertLogRelRCReturn(rc, rc);
1704
1705 pPGM->fMappingsFixed = pgmOld.fMappingsFixed;
1706 pPGM->GCPtrMappingFixed = pgmOld.GCPtrMappingFixed;
1707 pPGM->cbMappingFixed = pgmOld.cbMappingFixed;
1708
1709 pVM->aCpus[0].pgm.s.fA20Enabled = pgmOld.fA20Enabled;
1710 pVM->aCpus[0].pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
1711 pVM->aCpus[0].pgm.s.enmGuestMode = pgmOld.enmGuestMode;
1712 }
1713 else
1714 {
1715 AssertRelease(pVM->cCpus == 1);
1716
1717 SSMR3GetBool(pSSM, &pPGM->fMappingsFixed);
1718 SSMR3GetGCPtr(pSSM, &pPGM->GCPtrMappingFixed);
1719 SSMR3GetU32(pSSM, &pPGM->cbMappingFixed);
1720
1721 uint32_t cbRamSizeIgnored;
1722 rc = SSMR3GetU32(pSSM, &cbRamSizeIgnored);
1723 if (RT_FAILURE(rc))
1724 return rc;
1725 SSMR3GetGCPhys(pSSM, &pVM->aCpus[0].pgm.s.GCPhysA20Mask);
1726
1727 uint32_t u32 = 0;
1728 SSMR3GetUInt(pSSM, &u32);
1729 pVM->aCpus[0].pgm.s.fA20Enabled = !!u32;
1730 SSMR3GetUInt(pSSM, &pVM->aCpus[0].pgm.s.fSyncFlags);
1731 RTUINT uGuestMode;
1732 SSMR3GetUInt(pSSM, &uGuestMode);
1733 pVM->aCpus[0].pgm.s.enmGuestMode = (PGMMODE)uGuestMode;
1734
1735 /* check separator. */
1736 SSMR3GetU32(pSSM, &u32Sep);
1737 if (RT_FAILURE(rc))
1738 return rc;
1739 if (u32Sep != (uint32_t)~0)
1740 {
1741 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
1742 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1743 }
1744 }
1745
1746 /*
1747 * The guest mappings.
1748 */
1749 uint32_t i = 0;
1750 for (;; i++)
1751 {
1752 /* Check the seqence number / separator. */
1753 rc = SSMR3GetU32(pSSM, &u32Sep);
1754 if (RT_FAILURE(rc))
1755 return rc;
1756 if (u32Sep == ~0U)
1757 break;
1758 if (u32Sep != i)
1759 {
1760 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
1761 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1762 }
1763
1764 /* get the mapping details. */
1765 char szDesc[256];
1766 szDesc[0] = '\0';
1767 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
1768 if (RT_FAILURE(rc))
1769 return rc;
1770 RTGCPTR GCPtr;
1771 SSMR3GetGCPtr(pSSM, &GCPtr);
1772 RTGCPTR cPTs;
1773 rc = SSMR3GetGCUIntPtr(pSSM, &cPTs);
1774 if (RT_FAILURE(rc))
1775 return rc;
1776
1777 /* find matching range. */
1778 PPGMMAPPING pMapping;
1779 for (pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3)
1780 if ( pMapping->cPTs == cPTs
1781 && !strcmp(pMapping->pszDesc, szDesc))
1782 break;
1783 AssertLogRelMsgReturn(pMapping, ("Couldn't find mapping: cPTs=%#x szDesc=%s (GCPtr=%RGv)\n",
1784 cPTs, szDesc, GCPtr),
1785 VERR_SSM_LOAD_CONFIG_MISMATCH);
1786
1787 /* relocate it. */
1788 if (pMapping->GCPtr != GCPtr)
1789 {
1790 AssertMsg((GCPtr >> X86_PD_SHIFT << X86_PD_SHIFT) == GCPtr, ("GCPtr=%RGv\n", GCPtr));
1791 pgmR3MapRelocate(pVM, pMapping, pMapping->GCPtr, GCPtr);
1792 }
1793 else
1794 Log(("pgmR3Load: '%s' needed no relocation (%RGv)\n", szDesc, GCPtr));
1795 }
1796
1797 /*
1798 * Load the RAM contents.
1799 */
1800 if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
1801 return pgmR3LoadMemory(pVM, pSSM, SSM_PASS_FINAL);
1802 return pgmR3LoadMemoryOld(pVM, pSSM, uVersion);
1803}
1804
1805
1806/**
1807 * Execute state load operation.
1808 *
1809 * @returns VBox status code.
1810 * @param pVM VM Handle.
1811 * @param pSSM SSM operation handle.
1812 * @param uVersion Data layout version.
1813 * @param uPass The data pass.
1814 */
1815static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1816{
1817 int rc;
1818 PPGM pPGM = &pVM->pgm.s;
1819 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
1820
1821 /*
1822 * Validate version.
1823 */
1824 if ( ( uPass != SSM_PASS_FINAL
1825 && uVersion != PGM_SAVED_STATE_VERSION)
1826 || ( uVersion != PGM_SAVED_STATE_VERSION
1827 && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
1828 && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
1829 && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
1830 && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
1831 )
1832 {
1833 AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
1834 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1835 }
1836
1837 if (uPass != SSM_PASS_FINAL)
1838 {
1839 /*
1840 * The non-final passes contains only memory.
1841 */
1842 pgmLock(pVM);
1843 rc = pgmR3LoadMemory(pVM, pSSM, uPass);
1844 pgmUnlock(pVM);
1845 }
1846 else
1847 {
1848 /*
1849 * Do the loading while owning the lock because a bunch of the functions
1850 * we're using requires this.
1851 */
1852 pgmLock(pVM);
1853 rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
1854 pgmUnlock(pVM);
1855 if (RT_SUCCESS(rc))
1856 {
1857 /*
1858 * We require a full resync now.
1859 */
1860 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1861 {
1862 PVMCPU pVCpu = &pVM->aCpus[i];
1863 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1864 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1865
1866 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
1867 }
1868
1869 pgmR3HandlerPhysicalUpdateAll(pVM);
1870
1871 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1872 {
1873 PVMCPU pVCpu = &pVM->aCpus[i];
1874
1875 /*
1876 * Change the paging mode.
1877 */
1878 rc = PGMR3ChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode);
1879
1880 /* Restore pVM->pgm.s.GCPhysCR3. */
1881 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
1882 RTGCPHYS GCPhysCR3 = CPUMGetGuestCR3(pVCpu);
1883 if ( pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE
1884 || pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1885 || pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64
1886 || pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1887 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAE_PAGE_MASK);
1888 else
1889 GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAGE_MASK);
1890 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1891 }
1892 }
1893 }
1894
1895 return rc;
1896}
1897
1898
1899/**
1900 * Registers the saved state callbacks with SSM.
1901 *
1902 * @returns VBox status code.
1903 * @param pVM Pointer to VM structure.
1904 * @param cbRam The RAM size.
1905 */
1906int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
1907{
1908 return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
1909 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
1910 NULL, pgmR3SaveExec, pgmR3SaveDone,
1911 pgmR3LoadPrep, pgmR3Load, NULL);
1912}
1913
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette