VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 960

Last change on this file since 960 was 877, checked in by vboxsync, 18 years ago

Changed error message when out of memory. (no longer allowed to save the state)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 19.9 KB
Line 
1/* $Id: PGMPhys.cpp 877 2007-02-13 15:31:16Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/pgm.h>
28#include <VBox/cpum.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/pdm.h>
33#include <VBox/stam.h>
34#include <VBox/rem.h>
35#include <VBox/csam.h>
36#include "PGMInternal.h"
37#include <VBox/vm.h>
38#include <VBox/dbg.h>
39#include <VBox/param.h>
40#include <VBox/err.h>
41#include <iprt/assert.h>
42#include <iprt/alloc.h>
43#include <iprt/asm.h>
44#include <VBox/log.h>
45#include <iprt/thread.h>
46#include <iprt/string.h>
47
48
49
50
51/**
52 * Interface MMR3RamRegister(), MMR3RomRegister() and MMIO handler
53 * registration calls.
54 *
55 * It registers the physical memory range with PGM. MM is responsible
56 * for the toplevel things - allocation and locking - while PGM is taking
57 * care of all the details and implements the physical address space virtualization.
58 *
59 * @returns VBox status.
60 * @param pVM The VM handle.
61 * @param pvRam HC virtual address of the RAM range. (page aligned)
62 * @param GCPhys GC physical address of the RAM range. (page aligned)
63 * @param cb Size of the RAM range. (page aligned)
64 * @param fFlags Flags, MM_RAM_*.
65 * @param paPages Pointer an array of physical page descriptors.
66 * @param pszDesc Description string.
67 */
68PGMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
69{
70 /*
71 * Validate input.
72 * (Not so important because callers are only MMR3PhysRegister()
73 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
74 */
75 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
76
77 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
78 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
79 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
80 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
81 Assert(!(fFlags & ~0xfff));
82 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
83 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
84 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
85 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
86 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
87 if (GCPhysLast < GCPhys)
88 {
89 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
90 return VERR_INVALID_PARAMETER;
91 }
92
93 /*
94 * Find range location and check for conflicts.
95 */
96 PPGMRAMRANGE pPrev = NULL;
97 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesHC;
98 while (pCur)
99 {
100 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
101 {
102 AssertMsgFailed(("Conflict! This cannot happen!\n"));
103 return VERR_PGM_RAM_CONFLICT;
104 }
105 if (GCPhysLast < pCur->GCPhys)
106 break;
107
108 /* next */
109 pPrev = pCur;
110 pCur = pCur->pNextHC;
111 }
112
113 /*
114 * Allocate RAM range.
115 * Small ranges are allocated from the heap, big ones have separate mappings.
116 */
117 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aHCPhys[cb >> PAGE_SHIFT]);
118 PPGMRAMRANGE pNew;
119 RTGCPTR GCPtrNew;
120 int rc;
121 if (cbRam > PAGE_SIZE / 2)
122 { /* large */
123 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
124 rc = SUPPageAlloc(cbRam >> PAGE_SHIFT, (void **)&pNew);
125 if (VBOX_SUCCESS(rc))
126 {
127 rc = MMR3HyperMapHCRam(pVM, pNew, cbRam, true, pszDesc, &GCPtrNew);
128 if (VBOX_SUCCESS(rc))
129 {
130 Assert(MMHyperHC2GC(pVM, pNew) == GCPtrNew);
131 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
132 }
133 else
134 {
135 AssertMsgFailed(("MMR3HyperMapHCRam(,,%#x,,,) -> %Vrc\n", cbRam, rc));
136 SUPPageFree(pNew);
137 }
138 }
139 else
140 AssertMsgFailed(("SUPPageAlloc(%#x,,) -> %Vrc\n", cbRam >> PAGE_SHIFT, rc));
141 }
142 else
143 { /* small */
144 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
145 if (VBOX_SUCCESS(rc))
146 GCPtrNew = MMHyperHC2GC(pVM, pNew);
147 else
148 AssertMsgFailed(("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb));
149 }
150 if (VBOX_SUCCESS(rc))
151 {
152 /*
153 * Initialize the range.
154 */
155 pNew->pvHC = pvRam;
156 pNew->GCPhys = GCPhys;
157 pNew->GCPhysLast = GCPhysLast;
158 pNew->cb = cb;
159 pNew->fFlags = fFlags;
160 pNew->pavHCChunkHC = NULL;
161 pNew->pavHCChunkGC = 0;
162
163 unsigned iPage = cb >> PAGE_SHIFT;
164 if (paPages)
165 {
166 while (iPage-- > 0)
167 pNew->aHCPhys[iPage] = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags;
168 }
169 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
170 {
171 /* Allocate memory for chunk to HC ptr lookup array. */
172 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
173 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb), rc);
174
175 pNew->pavHCChunkGC = MMHyperHC2GC(pVM, pNew->pavHCChunkHC);
176 Assert(pNew->pavHCChunkGC);
177
178 /* Physical memory will be allocated on demand. */
179 while (iPage-- > 0)
180 pNew->aHCPhys[iPage] = fFlags;
181 }
182 else
183 {
184 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
185 RTHCPHYS HCPhysDummyPage = (MMR3PageDummyHCPhys(pVM) & X86_PTE_PAE_PG_MASK) | fFlags;
186 while (iPage-- > 0)
187 pNew->aHCPhys[iPage] = HCPhysDummyPage;
188 }
189
190 /*
191 * Insert the new RAM range.
192 */
193 pgmLock(pVM);
194 pNew->pNextHC = pCur;
195 pNew->pNextGC = pCur ? MMHyperHC2GC(pVM, pCur) : 0;
196 if (pPrev)
197 {
198 pPrev->pNextHC = pNew;
199 pPrev->pNextGC = GCPtrNew;
200 }
201 else
202 {
203 pVM->pgm.s.pRamRangesHC = pNew;
204 pVM->pgm.s.pRamRangesGC = GCPtrNew;
205 }
206 pgmUnlock(pVM);
207 }
208 return rc;
209}
210
211
212/**
213 * Register a chunk of a the physical memory range with PGM. MM is responsible
214 * for the toplevel things - allocation and locking - while PGM is taking
215 * care of all the details and implements the physical address space virtualization.
216 *
217 *
218 * @returns VBox status.
219 * @param pVM The VM handle.
220 * @param pvRam HC virtual address of the RAM range. (page aligned)
221 * @param GCPhys GC physical address of the RAM range. (page aligned)
222 * @param cb Size of the RAM range. (page aligned)
223 * @param fFlags Flags, MM_RAM_*.
224 * @param paPages Pointer an array of physical page descriptors.
225 * @param pszDesc Description string.
226 */
227PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
228{
229#ifdef PGM_DYNAMIC_RAM_ALLOC
230 NOREF(pszDesc);
231
232 /*
233 * Validate input.
234 * (Not so important because callers are only MMR3PhysRegister()
235 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
236 */
237 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
238
239 Assert(paPages);
240 Assert(pvRam);
241 Assert(!(fFlags & ~0xfff));
242 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
243 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
244 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
245 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
246 Assert(VM_IS_EMT(pVM));
247 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
248 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
249
250 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
251 if (GCPhysLast < GCPhys)
252 {
253 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
254 return VERR_INVALID_PARAMETER;
255 }
256
257 /*
258 * Find existing range location.
259 */
260 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
261 while (pRam)
262 {
263 RTGCPHYS off = GCPhys - pRam->GCPhys;
264 if ( off < pRam->cb
265 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
266 break;
267
268 pRam = CTXSUFF(pRam->pNext);
269 }
270 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
271
272 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
273 unsigned iPage = cb >> PAGE_SHIFT;
274 if (paPages)
275 {
276 while (iPage-- > 0)
277 pRam->aHCPhys[off + iPage] = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags;
278 }
279 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
280 pRam->pavHCChunkHC[off] = pvRam;
281
282 /* Notify the recompiler. */
283 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
284
285 return VINF_SUCCESS;
286#else /* !PGM_DYNAMIC_RAM_ALLOC */
287 AssertReleaseMsgFailed(("Shouldn't ever get here when PGM_DYNAMIC_RAM_ALLOC isn't defined!\n"));
288 return VERR_INTERNAL_ERROR;
289#endif /* !PGM_DYNAMIC_RAM_ALLOC */
290}
291
292
293/**
294 * Allocate missing physical pages for an existing guest RAM range.
295 *
296 * @returns VBox status.
297 * @param pVM The VM handle.
298 * @param GCPhys GC physical address of the RAM range. (page aligned)
299 */
300PGMR3DECL(int) PGM3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
301{
302 /*
303 * Walk range list.
304 */
305 pgmLock(pVM);
306
307 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
308 while (pRam)
309 {
310 RTGCPHYS off = GCPhys - pRam->GCPhys;
311 if ( off < pRam->cb
312 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
313 {
314 bool fRangeExists = false;
315 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
316
317 /** @note A request made from another thread may end up in EMT after somebody else has already allocated the range. */
318 if (pRam->pavHCChunkHC[off])
319 fRangeExists = true;
320
321 pgmUnlock(pVM);
322 if (fRangeExists)
323 return VINF_SUCCESS;
324 return pgmr3PhysGrowRange(pVM, GCPhys);
325 }
326
327 pRam = CTXSUFF(pRam->pNext);
328 }
329 pgmUnlock(pVM);
330 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
331}
332
333
334/**
335 * Allocate missing physical pages for an existing guest RAM range.
336 *
337 * @returns VBox status.
338 * @param pVM The VM handle.
339 * @param pRamRange RAM range
340 * @param GCPhys GC physical address of the RAM range. (page aligned)
341 */
342int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
343{
344 void *pvRam;
345 int rc;
346
347 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
348 if (!VM_IS_EMT(pVM))
349 {
350 PVMREQ pReq;
351
352 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
353
354 rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, GCPhys);
355 if (VBOX_SUCCESS(rc))
356 {
357 rc = pReq->iStatus;
358 VMR3ReqFree(pReq);
359 }
360 return rc;
361 }
362
363 /* Round down to chunk boundary */
364 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
365
366 STAM_COUNTER_INC(&pVM->pgm.s.StatDynRamGrow);
367 STAM_COUNTER_ADD(&pVM->pgm.s.StatDynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
368
369 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %VGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
370
371 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
372 rc = SUPPageAlloc(cPages, &pvRam);
373 if (VBOX_SUCCESS(rc))
374 {
375 VMSTATE enmVMState = VMR3GetState(pVM);
376
377 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
378 if ( VBOX_SUCCESS(rc)
379 || enmVMState != VMSTATE_RUNNING)
380 {
381 if (VBOX_FAILURE(rc))
382 {
383 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %VGp!\n", GCPhys));
384 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %VGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
385 }
386 return rc;
387 }
388
389 SUPPageFree(pvRam);
390
391 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
392 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM.");
393
394 rc = VMR3SuspendNoSave(pVM);
395 AssertRC(rc);
396
397 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
398 rc = VMR3WaitForResume(pVM);
399
400 /* Retry */
401 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
402 return pgmr3PhysGrowRange(pVM, GCPhys);
403 }
404 return rc;
405}
406
407
408/**
409 * Interface MMIO handler relocation calls.
410 *
411 * It relocates an existing physical memory range with PGM.
412 *
413 * @returns VBox status.
414 * @param pVM The VM handle.
415 * @param GCPhysOld Previous GC physical address of the RAM range. (page aligned)
416 * @param GCPhysNew New GC physical address of the RAM range. (page aligned)
417 * @param cb Size of the RAM range. (page aligned)
418 */
419PGMR3DECL(int) PGMR3PhysRelocate(PVM pVM, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, size_t cb)
420{
421 /*
422 * Validate input.
423 * (Not so important because callers are only MMR3PhysRelocate(),
424 * but anyway...)
425 */
426 Log(("PGMR3PhysRelocate Old %VGp New %VGp (%#x bytes)\n", GCPhysOld, GCPhysNew, cb));
427
428 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
429 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
430 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
431 RTGCPHYS GCPhysLast;
432 GCPhysLast = GCPhysOld + (cb - 1);
433 if (GCPhysLast < GCPhysOld)
434 {
435 AssertMsgFailed(("The old range wraps! GCPhys=%VGp cb=%#x\n", GCPhysOld, cb));
436 return VERR_INVALID_PARAMETER;
437 }
438 GCPhysLast = GCPhysNew + (cb - 1);
439 if (GCPhysLast < GCPhysNew)
440 {
441 AssertMsgFailed(("The new range wraps! GCPhys=%VGp cb=%#x\n", GCPhysNew, cb));
442 return VERR_INVALID_PARAMETER;
443 }
444
445 /*
446 * Find and remove old range location.
447 */
448 pgmLock(pVM);
449 PPGMRAMRANGE pPrev = NULL;
450 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesHC;
451 while (pCur)
452 {
453 if (pCur->GCPhys == GCPhysOld && pCur->cb == cb)
454 break;
455
456 /* next */
457 pPrev = pCur;
458 pCur = pCur->pNextHC;
459 }
460 if (pPrev)
461 {
462 pPrev->pNextHC = pCur->pNextHC;
463 pPrev->pNextGC = pCur->pNextGC;
464 }
465 else
466 {
467 pVM->pgm.s.pRamRangesHC = pCur->pNextHC;
468 pVM->pgm.s.pRamRangesGC = pCur->pNextGC;
469 }
470
471 /*
472 * Update the range.
473 */
474 pCur->GCPhys = GCPhysNew;
475 pCur->GCPhysLast= GCPhysLast;
476 PPGMRAMRANGE pNew = pCur;
477
478 /*
479 * Find range location and check for conflicts.
480 */
481 pPrev = NULL;
482 pCur = pVM->pgm.s.pRamRangesHC;
483 while (pCur)
484 {
485 if (GCPhysNew <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
486 {
487 AssertMsgFailed(("Conflict! This cannot happen!\n"));
488 pgmUnlock(pVM);
489 return VERR_PGM_RAM_CONFLICT;
490 }
491 if (GCPhysLast < pCur->GCPhys)
492 break;
493
494 /* next */
495 pPrev = pCur;
496 pCur = pCur->pNextHC;
497 }
498
499 /*
500 * Reinsert the RAM range.
501 */
502 pNew->pNextHC = pCur;
503 pNew->pNextGC = pCur ? MMHyperHC2GC(pVM, pCur) : 0;
504 if (pPrev)
505 {
506 pPrev->pNextHC = pNew;
507 pPrev->pNextGC = MMHyperHC2GC(pVM, pNew);
508 }
509 else
510 {
511 pVM->pgm.s.pRamRangesHC = pNew;
512 pVM->pgm.s.pRamRangesGC = MMHyperHC2GC(pVM, pNew);
513 }
514
515 pgmUnlock(pVM);
516 return VINF_SUCCESS;
517}
518
519
520/**
521 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
522 * flags of existing RAM ranges.
523 *
524 * @returns VBox status.
525 * @param pVM The VM handle.
526 * @param GCPhys GC physical address of the RAM range. (page aligned)
527 * @param cb Size of the RAM range. (page aligned)
528 * @param fFlags The Or flags, MM_RAM_* \#defines.
529 * @param fMask The and mask for the flags.
530 */
531PGMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
532{
533 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
534
535 /*
536 * Validate input.
537 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
538 */
539 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
540 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
541 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
542 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
543 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
544
545 /*
546 * Lookup the range.
547 */
548 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
549 while (pRam && GCPhys > pRam->GCPhysLast)
550 pRam = CTXSUFF(pRam->pNext);
551 if ( !pRam
552 || GCPhys > pRam->GCPhysLast
553 || GCPhysLast < pRam->GCPhys)
554 {
555 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
556 return VERR_INVALID_PARAMETER;
557 }
558
559 /*
560 * Update the requested flags.
561 */
562 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
563 | fMask;
564 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
565 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
566 for ( ; iPage < iPageEnd; iPage++)
567 pRam->aHCPhys[iPage] = (pRam->aHCPhys[iPage] & fFullMask) | fFlags;
568
569 return VINF_SUCCESS;
570}
571
572
573/**
574 * Sets the Address Gate 20 state.
575 *
576 * @param pVM VM handle.
577 * @param fEnable True if the gate should be enabled.
578 * False if the gate should be disabled.
579 */
580PGMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
581{
582 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
583 if (pVM->pgm.s.fA20Enabled != (RTUINT)fEnable)
584 {
585 pVM->pgm.s.fA20Enabled = fEnable;
586 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
587 REMR3A20Set(pVM, fEnable);
588 }
589}
590
591
592/*
593 * PGMR3PhysReadByte/Word/Dword
594 * PGMR3PhysWriteByte/Word/Dword
595 */
596
597#define PGMPHYSFN_READNAME PGMR3PhysReadByte
598#define PGMPHYSFN_WRITENAME PGMR3PhysWriteByte
599#define PGMPHYS_DATASIZE 1
600#define PGMPHYS_DATATYPE uint8_t
601#include "PGMPhys.h"
602
603#define PGMPHYSFN_READNAME PGMR3PhysReadWord
604#define PGMPHYSFN_WRITENAME PGMR3PhysWriteWord
605#define PGMPHYS_DATASIZE 2
606#define PGMPHYS_DATATYPE uint16_t
607#include "PGMPhys.h"
608
609#define PGMPHYSFN_READNAME PGMR3PhysReadDword
610#define PGMPHYSFN_WRITENAME PGMR3PhysWriteDword
611#define PGMPHYS_DATASIZE 4
612#define PGMPHYS_DATATYPE uint32_t
613#include "PGMPhys.h"
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette