VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGM-armv8.cpp@ 107931

Last change on this file since 107931 was 107876, checked in by vboxsync, 3 months ago

VMM/PGM: Merge and deduplicate code targeting x86 & amd64 in PGM.cpp. VBP-1470

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 32.6 KB
Line 
1/* $Id: PGM-armv8.cpp 107876 2025-01-21 15:46:55Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, ARMv8 variant. (Mixing stuff here, not good?)
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_pgm_armv8 PGM - The Page Manager and Monitor (ARMv8 variant)
30 *
31 * For now this is just a stub for bringing up the ARMv8 hypervisor. We'll see how
32 * much we really need here later on and whether it makes sense to merge this with the original PGM.cpp
33 * (avoiding \#ifdef hell for with this as I'm not confident enough to fiddle around with PGM too much at this point).
34 */
35
36
37/*********************************************************************************************************************************
38* Header Files *
39*********************************************************************************************************************************/
40#define LOG_GROUP LOG_GROUP_PGM
41#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
42#include <VBox/vmm/dbgf.h>
43#include <VBox/vmm/pgm.h>
44#include <VBox/vmm/cpum.h>
45#include <VBox/vmm/cpum-armv8.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/sup.h>
48#include <VBox/vmm/mm.h>
49#include <VBox/vmm/em.h>
50#include <VBox/vmm/stam.h>
51#include <VBox/vmm/selm.h>
52#include <VBox/vmm/ssm.h>
53#include <VBox/vmm/hm.h>
54#include "PGMInternal.h"
55#include <VBox/vmm/vmcc.h>
56#include <VBox/vmm/uvm.h>
57#include "PGMInline.h"
58
59#include <VBox/dbg.h>
60#include <VBox/param.h>
61#include <VBox/err.h>
62
63#include <iprt/asm.h>
64#include <iprt/assert.h>
65#include <iprt/env.h>
66#include <iprt/file.h>
67#include <iprt/mem.h>
68#include <iprt/rand.h>
69#include <iprt/string.h>
70#include <iprt/thread.h>
71
72
73#if 0 /* now in taken from PGM.cpp where it came from */
74
75
76/*********************************************************************************************************************************
77* Internal Functions *
78*********************************************************************************************************************************/
79#ifdef VBOX_STRICT
80static FNVMATSTATE pgmR3ResetNoMorePhysWritesFlag;
81#endif
82
83
84/*********************************************************************************************************************************
85* Global Variables *
86*********************************************************************************************************************************/
87#ifndef VBOX_WITH_PGM_NEM_MODE
88# error "This requires VBOX_WITH_PGM_NEM_MODE to be set at all times!"
89#endif
90
91/**
92 * Interface that NEM uses to switch PGM into simplified memory managment mode.
93 *
94 * This call occurs before PGMR3Init.
95 *
96 * @param pVM The cross context VM structure.
97 */
98VMMR3_INT_DECL(void) PGMR3EnableNemMode(PVM pVM)
99{
100 AssertFatal(!PDMCritSectIsInitialized(&pVM->pgm.s.CritSectX));
101#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
102 pVM->pgm.s.fNemMode = true;
103#endif
104}
105
106
107/**
108 * Checks whether the simplificed memory management mode for NEM is enabled.
109 *
110 * @returns true if enabled, false if not.
111 * @param pVM The cross context VM structure.
112 */
113VMMR3_INT_DECL(bool) PGMR3IsNemModeEnabled(PVM pVM)
114{
115 RT_NOREF(pVM);
116 return PGM_IS_IN_NEM_MODE(pVM);
117}
118
119
120/**
121 * Initiates the paging of VM.
122 *
123 * @returns VBox status code.
124 * @param pVM The cross context VM structure.
125 */
126VMMR3DECL(int) PGMR3Init(PVM pVM)
127{
128 LogFlow(("PGMR3Init:\n"));
129
130 /*
131 * Assert alignment and sizes.
132 */
133 AssertCompile(sizeof(pVM->pgm.s) <= sizeof(pVM->pgm.padding));
134 AssertCompile(sizeof(pVM->apCpusR3[0]->pgm.s) <= sizeof(pVM->apCpusR3[0]->pgm.padding));
135 AssertCompileMemberAlignment(PGM, CritSectX, sizeof(uintptr_t));
136
137 bool const fDriverless = SUPR3IsDriverless();
138
139 /*
140 * Init the structure.
141 */
142 /*pVM->pgm.s.fRestoreRomPagesAtReset = false;*/
143
144 /* We always use the simplified memory mode on arm. */
145#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
146 pVM->pgm.s.fNemMode = true;
147#endif
148
149 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
150 {
151 pVM->pgm.s.aHandyPages[i].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS;
152 pVM->pgm.s.aHandyPages[i].fZeroed = false;
153 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
154 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
155 }
156
157 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLargeHandyPage); i++)
158 {
159 pVM->pgm.s.aLargeHandyPage[i].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS;
160 pVM->pgm.s.aLargeHandyPage[i].fZeroed = false;
161 pVM->pgm.s.aLargeHandyPage[i].idPage = NIL_GMM_PAGEID;
162 pVM->pgm.s.aLargeHandyPage[i].idSharedPage = NIL_GMM_PAGEID;
163 }
164
165 AssertReleaseReturn(pVM->pgm.s.cPhysHandlerTypes == 0, VERR_WRONG_ORDER);
166 for (size_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aPhysHandlerTypes); i++)
167 {
168#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
169 if (fDriverless)
170#endif
171 pVM->pgm.s.aPhysHandlerTypes[i].hType = i | (RTRandU64() & ~(uint64_t)PGMPHYSHANDLERTYPE_IDX_MASK);
172 pVM->pgm.s.aPhysHandlerTypes[i].enmKind = PGMPHYSHANDLERKIND_INVALID;
173 pVM->pgm.s.aPhysHandlerTypes[i].pfnHandler = pgmR3HandlerPhysicalHandlerInvalid;
174 }
175
176#if 0
177 /* Init the per-CPU part. */
178 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
179 {
180 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
181 PPGMCPU pPGM = &pVCpu->pgm.s;
182 }
183#endif
184
185 /*
186 * Read the configuration.
187 */
188 PCFGMNODE const pCfgPGM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/PGM");
189
190 /** @todo RamPreAlloc doesn't work for NEM-mode. */
191 int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "RamPreAlloc", &pVM->pgm.s.fRamPreAlloc,
192#ifdef VBOX_WITH_PREALLOC_RAM_BY_DEFAULT
193 true
194#else
195 false
196#endif
197 );
198 AssertLogRelRCReturn(rc, rc);
199
200#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
201 rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, UINT32_MAX);
202 AssertLogRelRCReturn(rc, rc);
203 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
204 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
205#endif
206
207 /*
208 * Get the configured RAM size - to estimate saved state size.
209 */
210 uint64_t cbRam;
211 rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
212 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
213 cbRam = 0;
214 else if (RT_SUCCESS(rc))
215 {
216 if (cbRam < GUEST_PAGE_SIZE)
217 cbRam = 0;
218 cbRam = RT_ALIGN_64(cbRam, GUEST_PAGE_SIZE);
219 }
220 else
221 {
222 AssertMsgFailed(("Configuration error: Failed to query integer \"RamSize\", rc=%Rrc.\n", rc));
223 return rc;
224 }
225
226 /** @cfgm{/PGM/ZeroRamPagesOnReset, boolean, true}
227 * Whether to clear RAM pages on (hard) reset. */
228 rc = CFGMR3QueryBoolDef(pCfgPGM, "ZeroRamPagesOnReset", &pVM->pgm.s.fZeroRamPagesOnReset, true);
229 AssertLogRelRCReturn(rc, rc);
230
231 /*
232 * Register callbacks, string formatters and the saved state data unit.
233 */
234#ifdef VBOX_STRICT
235 VMR3AtStateRegister(pVM->pUVM, pgmR3ResetNoMorePhysWritesFlag, NULL);
236#endif
237 PGMRegisterStringFormatTypes();
238
239 rc = pgmR3InitSavedState(pVM, cbRam);
240 if (RT_FAILURE(rc))
241 return rc;
242
243 /*
244 * Initialize the PGM critical section and flush the phys TLBs
245 */
246 rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSectX, RT_SRC_POS, "PGM");
247 AssertRCReturn(rc, rc);
248
249#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
250 pgmR3PhysChunkInvalidateTLB(pVM, false /*fInRendezvous*/); /* includes pgmPhysInvalidatePageMapTLB call */
251#endif
252
253 /*
254 * For the time being we sport a full set of handy pages in addition to the base
255 * memory to simplify things.
256 */
257 rc = MMR3ReserveHandyPages(pVM, RT_ELEMENTS(pVM->pgm.s.aHandyPages)); /** @todo this should be changed to PGM_HANDY_PAGES_MIN but this needs proper testing... */
258 AssertRCReturn(rc, rc);
259
260 /*
261 * Setup the zero page (HCPHysZeroPg is set by ring-0).
262 */
263 RT_ZERO(pVM->pgm.s.abZeroPg); /* paranoia */
264#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
265 if (fDriverless)
266 pVM->pgm.s.HCPhysZeroPg = _4G - GUEST_PAGE_SIZE * 2 /* fake to avoid PGM_PAGE_INIT_ZERO assertion */;
267 AssertRelease(pVM->pgm.s.HCPhysZeroPg != NIL_RTHCPHYS);
268 AssertRelease(pVM->pgm.s.HCPhysZeroPg != 0);
269#endif
270
271 /*
272 * Setup the invalid MMIO page (HCPhysMmioPg is set by ring-0).
273 * (The invalid bits in HCPhysInvMmioPg are set later on init complete.)
274 */
275 ASMMemFill32(pVM->pgm.s.abMmioPg, sizeof(pVM->pgm.s.abMmioPg), 0xfeedface);
276#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
277 if (fDriverless)
278 pVM->pgm.s.HCPhysMmioPg = _4G - GUEST_PAGE_SIZE * 3 /* fake to avoid PGM_PAGE_INIT_ZERO assertion */;
279 AssertRelease(pVM->pgm.s.HCPhysMmioPg != NIL_RTHCPHYS);
280 AssertRelease(pVM->pgm.s.HCPhysMmioPg != 0);
281 pVM->pgm.s.HCPhysInvMmioPg = pVM->pgm.s.HCPhysMmioPg;
282#endif
283
284 /*
285 * Initialize physical access handlers.
286 */
287 /** @cfgm{/PGM/MaxPhysicalAccessHandlers, uint32_t, 32, 65536, 6144}
288 * Number of physical access handlers allowed (subject to rounding). This is
289 * managed as one time allocation during initializations. The default is
290 * lower for a driverless setup. */
291 /** @todo can lower it for nested paging too, at least when there is no
292 * nested guest involved. */
293 uint32_t cAccessHandlers = 0;
294 rc = CFGMR3QueryU32Def(pCfgPGM, "MaxPhysicalAccessHandlers", &cAccessHandlers, !fDriverless ? 6144 : 640);
295 AssertLogRelRCReturn(rc, rc);
296 AssertLogRelMsgStmt(cAccessHandlers >= 32, ("cAccessHandlers=%#x, min 32\n", cAccessHandlers), cAccessHandlers = 32);
297 AssertLogRelMsgStmt(cAccessHandlers <= _64K, ("cAccessHandlers=%#x, max 65536\n", cAccessHandlers), cAccessHandlers = _64K);
298#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
299 if (!fDriverless)
300 {
301 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_PHYS_HANDLER_INIT, cAccessHandlers, NULL);
302 AssertRCReturn(rc, rc);
303 AssertPtr(pVM->pgm.s.pPhysHandlerTree);
304 AssertPtr(pVM->pgm.s.PhysHandlerAllocator.m_paNodes);
305 AssertPtr(pVM->pgm.s.PhysHandlerAllocator.m_pbmAlloc);
306 }
307 else
308#endif
309 {
310 uint32_t cbTreeAndBitmap = 0;
311 uint32_t const cbTotalAligned = pgmHandlerPhysicalCalcTableSizes(&cAccessHandlers, &cbTreeAndBitmap);
312 uint8_t *pb = NULL;
313 rc = SUPR3PageAlloc(cbTotalAligned >> HOST_PAGE_SHIFT, 0, (void **)&pb);
314 AssertLogRelRCReturn(rc, rc);
315
316 pVM->pgm.s.PhysHandlerAllocator.initSlabAllocator(cAccessHandlers, (PPGMPHYSHANDLER)&pb[cbTreeAndBitmap],
317 (uint64_t *)&pb[sizeof(PGMPHYSHANDLERTREE)]);
318 pVM->pgm.s.pPhysHandlerTree = (PPGMPHYSHANDLERTREE)pb;
319 pVM->pgm.s.pPhysHandlerTree->initWithAllocator(&pVM->pgm.s.PhysHandlerAllocator);
320 }
321
322 /*
323 * Register the physical access handler protecting ROMs.
324 */
325 if (RT_SUCCESS(rc))
326 /** @todo why isn't pgmPhysRomWriteHandler registered for ring-0? */
327 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, 0 /*fFlags*/, pgmPhysRomWriteHandler,
328 "ROM write protection", &pVM->pgm.s.hRomPhysHandlerType);
329
330 /*
331 * Register the physical access handler doing dirty MMIO2 tracing.
332 */
333 if (RT_SUCCESS(rc))
334 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, PGMPHYSHANDLER_F_KEEP_PGM_LOCK,
335 pgmPhysMmio2WriteHandler, "MMIO2 dirty page tracing",
336 &pVM->pgm.s.hMmio2DirtyPhysHandlerType);
337
338 if (RT_SUCCESS(rc))
339 return VINF_SUCCESS;
340
341 /* Almost no cleanup necessary, MM frees all memory. */
342 PDMR3CritSectDelete(pVM, &pVM->pgm.s.CritSectX);
343
344 return rc;
345}
346
347
348/**
349 * Ring-3 init finalizing (not required here).
350 *
351 * @returns VBox status code.
352 * @param pVM The cross context VM structure.
353 */
354VMMR3DECL(int) PGMR3InitFinalize(PVM pVM)
355{
356 RT_NOREF(pVM);
357 int rc = VINF_SUCCESS;
358#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
359 if (pVM->pgm.s.fRamPreAlloc)
360 rc = pgmR3PhysRamPreAllocate(pVM);
361#endif
362
363 //pgmLogState(pVM);
364 LogRel(("PGM: PGMR3InitFinalize done: %Rrc\n", rc));
365 return rc;
366}
367
368
369/**
370 * Init phase completed callback.
371 *
372 * @returns VBox status code.
373 * @param pVM The cross context VM structure.
374 * @param enmWhat What has been completed.
375 * @thread EMT(0)
376 */
377VMMR3_INT_DECL(int) PGMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
378{
379 switch (enmWhat)
380 {
381 case VMINITCOMPLETED_HM:
382 AssertLogRelReturn(!pVM->pgm.s.fPciPassthrough, VERR_PGM_PCI_PASSTHRU_MISCONFIG);
383 break;
384
385 default:
386 /* shut up gcc */
387 break;
388 }
389
390 return VINF_SUCCESS;
391}
392
393
394/**
395 * Applies relocations to data and code managed by this component.
396 *
397 * This function will be called at init and whenever the VMM need to relocate it
398 * self inside the GC.
399 *
400 * @param pVM The cross context VM structure.
401 * @param offDelta Relocation delta relative to old location.
402 */
403VMMR3DECL(void) PGMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
404{
405 LogFlow(("PGMR3Relocate: offDelta=%RGv\n", offDelta));
406 RT_NOREF(pVM, offDelta);
407}
408
409
410/**
411 * Resets a virtual CPU when unplugged.
412 *
413 * @param pVM The cross context VM structure.
414 * @param pVCpu The cross context virtual CPU structure.
415 */
416VMMR3DECL(void) PGMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
417{
418 RT_NOREF(pVM, pVCpu);
419}
420
421
422/**
423 * The VM is being reset.
424 *
425 * For the PGM component this means that any PD write monitors
426 * needs to be removed.
427 *
428 * @param pVM The cross context VM structure.
429 */
430VMMR3_INT_DECL(void) PGMR3Reset(PVM pVM)
431{
432 LogFlow(("PGMR3Reset:\n"));
433 VM_ASSERT_EMT(pVM);
434
435 PGM_LOCK_VOID(pVM);
436
437#ifdef DEBUG
438 DBGFR3_INFO_LOG_SAFE(pVM, "mappings", NULL);
439 DBGFR3_INFO_LOG_SAFE(pVM, "handlers", "all nostat");
440#endif
441
442 //pgmLogState(pVM);
443 PGM_UNLOCK(pVM);
444}
445
446
447/**
448 * Memory setup after VM construction or reset.
449 *
450 * @param pVM The cross context VM structure.
451 * @param fAtReset Indicates the context, after reset if @c true or after
452 * construction if @c false.
453 */
454VMMR3_INT_DECL(void) PGMR3MemSetup(PVM pVM, bool fAtReset)
455{
456 if (fAtReset)
457 {
458 PGM_LOCK_VOID(pVM);
459
460 int rc = pgmR3PhysRamZeroAll(pVM);
461 AssertReleaseRC(rc);
462
463 rc = pgmR3PhysRomReset(pVM);
464 AssertReleaseRC(rc);
465
466 PGM_UNLOCK(pVM);
467 }
468}
469
470
471#ifdef VBOX_STRICT
472/**
473 * VM state change callback for clearing fNoMorePhysWrites after
474 * a snapshot has been created.
475 */
476static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PUVM pUVM, PCVMMR3VTABLE pVMM, VMSTATE enmState,
477 VMSTATE enmOldState, void *pvUser)
478{
479 if ( enmState == VMSTATE_RUNNING
480 || enmState == VMSTATE_RESUMING)
481 pUVM->pVM->pgm.s.fNoMorePhysWrites = false;
482 RT_NOREF(pVMM, enmOldState, pvUser);
483}
484#endif
485
486/**
487 * Private API to reset fNoMorePhysWrites.
488 */
489VMMR3_INT_DECL(void) PGMR3ResetNoMorePhysWritesFlag(PVM pVM)
490{
491 pVM->pgm.s.fNoMorePhysWrites = false;
492}
493
494/**
495 * Terminates the PGM.
496 *
497 * @returns VBox status code.
498 * @param pVM The cross context VM structure.
499 */
500VMMR3DECL(int) PGMR3Term(PVM pVM)
501{
502 /* Must free shared pages here. */
503 PGM_LOCK_VOID(pVM);
504 pgmR3PhysRamTerm(pVM);
505 pgmR3PhysRomTerm(pVM);
506 PGM_UNLOCK(pVM);
507
508 PGMDeregisterStringFormatTypes();
509 return PDMR3CritSectDelete(pVM, &pVM->pgm.s.CritSectX);
510}
511
512
513/**
514 * Perform an integrity check on the PGM component.
515 *
516 * @returns VINF_SUCCESS if everything is fine.
517 * @returns VBox error status after asserting on integrity breach.
518 * @param pVM The cross context VM structure.
519 */
520VMMR3DECL(int) PGMR3CheckIntegrity(PVM pVM)
521{
522 RT_NOREF(pVM);
523 return VINF_SUCCESS;
524}
525
526#endif
527
528VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
529{
530#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
531 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
532#else
533 RT_NOREF(pVM);
534 return false;
535#endif
536}
537
538
539VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
540{
541 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
542}
543
544
545VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
546{
547 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
548
549 pVM->pgm.s.fUseLargePages = fUseLargePages;
550 return VINF_SUCCESS;
551}
552
553
554#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
555int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
556#else
557int pgmLock(PVMCC pVM, bool fVoid)
558#endif
559{
560#if defined(VBOX_STRICT)
561 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
562#else
563 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
564#endif
565 if (RT_SUCCESS(rc))
566 return rc;
567 if (fVoid)
568 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
569 else
570 AssertRC(rc);
571 return rc;
572}
573
574
575void pgmUnlock(PVMCC pVM)
576{
577 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
578 pVM->pgm.s.cDeprecatedPageLocks = 0;
579 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
580 if (rc == VINF_SEM_NESTED)
581 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
582}
583
584
585#if !defined(IN_R0) || defined(LOG_ENABLED)
586
587/** Format handler for PGMPAGE.
588 * @copydoc FNRTSTRFORMATTYPE */
589static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
590 const char *pszType, void const *pvValue,
591 int cchWidth, int cchPrecision, unsigned fFlags,
592 void *pvUser)
593{
594 size_t cch;
595 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
596 if (RT_VALID_PTR(pPage))
597 {
598 char szTmp[64+80];
599
600 cch = 0;
601
602 /* The single char state stuff. */
603 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
604 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
605
606# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
607 if (IS_PART_INCLUDED(5))
608 {
609 static const char s_achHandlerStates[4*2] = { '-', 't', 'w', 'a' , '_', 'T', 'W', 'A' };
610 szTmp[cch++] = s_achHandlerStates[ PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)
611 | ((uint8_t)PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) << 2)];
612 }
613
614 /* The type. */
615 if (IS_PART_INCLUDED(4))
616 {
617 szTmp[cch++] = ':';
618 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
619 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
620 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
621 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
622 }
623
624 /* The numbers. */
625 if (IS_PART_INCLUDED(3))
626 {
627 szTmp[cch++] = ':';
628 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
629 }
630
631 if (IS_PART_INCLUDED(2))
632 {
633 szTmp[cch++] = ':';
634 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
635 }
636
637 if (IS_PART_INCLUDED(6))
638 {
639 szTmp[cch++] = ':';
640 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
641 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
642 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
643 }
644# undef IS_PART_INCLUDED
645
646 cch = pfnOutput(pvArgOutput, szTmp, cch);
647 }
648 else
649 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
650 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
651 return cch;
652}
653
654
655/** Format handler for PGMRAMRANGE.
656 * @copydoc FNRTSTRFORMATTYPE */
657static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
658 const char *pszType, void const *pvValue,
659 int cchWidth, int cchPrecision, unsigned fFlags,
660 void *pvUser)
661{
662 size_t cch;
663 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
664 if (RT_VALID_PTR(pRam))
665 {
666 char szTmp[80];
667 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
668 cch = pfnOutput(pvArgOutput, szTmp, cch);
669 }
670 else
671 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
672 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
673 return cch;
674}
675
676/** Format type andlers to be registered/deregistered. */
677static const struct
678{
679 char szType[24];
680 PFNRTSTRFORMATTYPE pfnHandler;
681} g_aPgmFormatTypes[] =
682{
683 { "pgmpage", pgmFormatTypeHandlerPage },
684 { "pgmramrange", pgmFormatTypeHandlerRamRange }
685};
686
687#endif /* !IN_R0 || LOG_ENABLED */
688
689
690VMMDECL(int) PGMRegisterStringFormatTypes(void)
691{
692#if !defined(IN_R0) || defined(LOG_ENABLED)
693 int rc = VINF_SUCCESS;
694 unsigned i;
695 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
696 {
697 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
698# ifdef IN_RING0
699 if (rc == VERR_ALREADY_EXISTS)
700 {
701 /* in case of cleanup failure in ring-0 */
702 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
703 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
704 }
705# endif
706 }
707 if (RT_FAILURE(rc))
708 while (i-- > 0)
709 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
710
711 return rc;
712#else
713 return VINF_SUCCESS;
714#endif
715}
716
717
718VMMDECL(void) PGMDeregisterStringFormatTypes(void)
719{
720#if !defined(IN_R0) || defined(LOG_ENABLED)
721 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
722 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
723#endif
724}
725
726
727VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
728{
729 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
730 VMCPU_ASSERT_EMT(pVCpu);
731
732 /*
733 * Validate input.
734 */
735 Assert(cb);
736
737 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
738 RT_NOREF(pVCpu, GCPtr, cb, fFlags, fMask);
739
740 AssertReleaseFailed();
741 return VERR_NOT_IMPLEMENTED;
742}
743
744
745VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
746{
747 VMCPU_ASSERT_EMT(pVCpu);
748
749 bool fMmuEnabled = CPUMGetGuestMmuEnabled(pVCpu);
750 if (!fMmuEnabled)
751 return PGMMODE_NONE;
752
753 CPUMMODE enmCpuMode = CPUMGetGuestMode(pVCpu);
754 return enmCpuMode == CPUMMODE_ARMV8_AARCH64
755 ? PGMMODE_VMSA_V8_64
756 : PGMMODE_VMSA_V8_32;
757}
758
759
760VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
761{
762 RT_NOREF(pVCpu);
763 return PGMMODE_NONE; /* NEM doesn't need any shadow paging. */
764}
765
766
767DECLINLINE(int) pgmGstWalkReturnNotPresent(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
768{
769 NOREF(pVCpu);
770 pWalk->fNotPresent = true;
771 pWalk->uLevel = uLevel;
772 pWalk->fFailed = PGM_WALKFAIL_NOT_PRESENT
773 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
774 return VERR_PAGE_TABLE_NOT_PRESENT;
775}
776
777DECLINLINE(int) pgmGstWalkReturnBadPhysAddr(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel, int rc)
778{
779 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
780 pWalk->fBadPhysAddr = true;
781 pWalk->uLevel = uLevel;
782 pWalk->fFailed = PGM_WALKFAIL_BAD_PHYSICAL_ADDRESS
783 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
784 return VERR_PAGE_TABLE_NOT_PRESENT;
785}
786
787
788DECLINLINE(int) pgmGstWalkReturnRsvdError(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
789{
790 NOREF(pVCpu);
791 pWalk->fRsvdError = true;
792 pWalk->uLevel = uLevel;
793 pWalk->fFailed = PGM_WALKFAIL_RESERVED_BITS
794 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
795 return VERR_PAGE_TABLE_NOT_PRESENT;
796}
797
798
799VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
800{
801 VMCPU_ASSERT_EMT(pVCpu);
802 Assert(pWalk);
803
804 pWalk->fSucceeded = false;
805
806 RTGCPHYS GCPhysPt = CPUMGetEffectiveTtbr(pVCpu, GCPtr);
807 if (GCPhysPt == RTGCPHYS_MAX) /* MMU disabled? */
808 {
809 pWalk->GCPtr = GCPtr;
810 pWalk->fSucceeded = true;
811 pWalk->GCPhys = GCPtr;
812 return VINF_SUCCESS;
813 }
814
815 /* Do the translation. */
816 /** @todo This is just a sketch to get something working for debugging, assumes 4KiB granules and 48-bit output address.
817 * Needs to be moved to PGMAllGst like on x86 and implemented for 16KiB and 64KiB granule sizes. */
818 uint64_t u64TcrEl1 = CPUMGetTcrEl1(pVCpu);
819 uint8_t u8TxSz = (GCPtr & RT_BIT_64(55))
820 ? ARMV8_TCR_EL1_AARCH64_T1SZ_GET(u64TcrEl1)
821 : ARMV8_TCR_EL1_AARCH64_T0SZ_GET(u64TcrEl1);
822 uint8_t uLookupLvl;
823 RTGCPHYS fLookupMask;
824
825 /*
826 * From: https://github.com/codingbelief/arm-architecture-reference-manual-for-armv8-a/blob/master/en/chapter_d4/d42_2_controlling_address_translation_stages.md
827 * For all translation stages
828 * The maximum TxSZ value is 39. If TxSZ is programmed to a value larger than 39 then it is IMPLEMENTATION DEFINED whether:
829 * - The implementation behaves as if the field is programmed to 39 for all purposes other than reading back the value of the field.
830 * - Any use of the TxSZ value generates a Level 0 Translation fault for the stage of translation at which TxSZ is used.
831 *
832 * For a stage 1 translation
833 * The minimum TxSZ value is 16. If TxSZ is programmed to a value smaller than 16 then it is IMPLEMENTATION DEFINED whether:
834 * - The implementation behaves as if the field were programmed to 16 for all purposes other than reading back the value of the field.
835 * - Any use of the TxSZ value generates a stage 1 Level 0 Translation fault.
836 *
837 * We currently choose the former for both.
838 */
839 if (/*u8TxSz >= 16 &&*/ u8TxSz <= 24)
840 {
841 uLookupLvl = 0;
842 fLookupMask = RT_BIT_64(24 - u8TxSz + 1) - 1;
843 }
844 else if (u8TxSz >= 25 && u8TxSz <= 33)
845 {
846 uLookupLvl = 1;
847 fLookupMask = RT_BIT_64(33 - u8TxSz + 1) - 1;
848 }
849 else /*if (u8TxSz >= 34 && u8TxSz <= 39)*/
850 {
851 uLookupLvl = 2;
852 fLookupMask = RT_BIT_64(39 - u8TxSz + 1) - 1;
853 }
854 /*else
855 return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 0, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);*/ /** @todo Better status (Invalid TCR config). */
856
857 uint64_t *pu64Pt = NULL;
858 uint64_t uPt;
859 int rc;
860 if (uLookupLvl == 0)
861 {
862 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
863 if (RT_SUCCESS(rc)) { /* probable */ }
864 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 0, rc);
865
866 uPt = pu64Pt[(GCPtr >> 39) & fLookupMask];
867 if (uPt & RT_BIT_64(0)) { /* probable */ }
868 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 0);
869
870 if (uPt & RT_BIT_64(1)) { /* probable */ }
871 else return pgmGstWalkReturnRsvdError(pVCpu, pWalk, 0); /** @todo Only supported if TCR_EL1.DS is set. */
872
873 /* All nine bits from now on. */
874 fLookupMask = RT_BIT_64(9) - 1;
875 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
876 }
877
878 if (uLookupLvl <= 1)
879 {
880 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
881 if (RT_SUCCESS(rc)) { /* probable */ }
882 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 1, rc);
883
884 uPt = pu64Pt[(GCPtr >> 30) & fLookupMask];
885 if (uPt & RT_BIT_64(0)) { /* probable */ }
886 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 1);
887
888 if (uPt & RT_BIT_64(1)) { /* probable */ }
889 else
890 {
891 /* Block descriptor (1G page). */
892 pWalk->GCPtr = GCPtr;
893 pWalk->fSucceeded = true;
894 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xffffc0000000)) | (GCPtr & (RTGCPTR)(_1G - 1));
895 pWalk->fGigantPage = true;
896 return VINF_SUCCESS;
897 }
898
899 /* All nine bits from now on. */
900 fLookupMask = RT_BIT_64(9) - 1;
901 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
902 }
903
904 if (uLookupLvl <= 2)
905 {
906 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
907 if (RT_SUCCESS(rc)) { /* probable */ }
908 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 2, rc);
909
910 uPt = pu64Pt[(GCPtr >> 21) & fLookupMask];
911 if (uPt & RT_BIT_64(0)) { /* probable */ }
912 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 2);
913
914 if (uPt & RT_BIT_64(1)) { /* probable */ }
915 else
916 {
917 /* Block descriptor (2M page). */
918 pWalk->GCPtr = GCPtr;
919 pWalk->fSucceeded = true;
920 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xffffffe00000)) | (GCPtr & (RTGCPTR)(_2M - 1));
921 pWalk->fBigPage = true;
922 return VINF_SUCCESS;
923 }
924
925 /* All nine bits from now on. */
926 fLookupMask = RT_BIT_64(9) - 1;
927 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
928 }
929
930 Assert(uLookupLvl <= 3);
931
932 /* Next level. */
933 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
934 if (RT_SUCCESS(rc)) { /* probable */ }
935 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 3, rc);
936
937 uPt = pu64Pt[(GCPtr & UINT64_C(0x1ff000)) >> 12];
938 if (uPt & RT_BIT_64(0)) { /* probable */ }
939 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 3);
940
941 if (uPt & RT_BIT_64(1)) { /* probable */ }
942 else return pgmGstWalkReturnRsvdError(pVCpu, pWalk, 3); /** No block descriptors. */
943
944 pWalk->GCPtr = GCPtr;
945 pWalk->fSucceeded = true;
946 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000)) | (GCPtr & (RTGCPTR)(_4K - 1));
947 return VINF_SUCCESS;
948}
949
950
951VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
952{
953 AssertReleaseFailed();
954 RT_NOREF(pVCpu, GCPtr, fOpFlags);
955 return VERR_NOT_IMPLEMENTED;
956}
957
958
959VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
960{
961 AssertReleaseFailed();
962 RT_NOREF(pVCpu, GCPtr, fOpFlags);
963 return VERR_NOT_IMPLEMENTED;
964}
965
966
967VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
968{
969 AssertReleaseFailed();
970 RT_NOREF(pVCpu, GCPtr, fOpFlags);
971 return VERR_NOT_IMPLEMENTED;
972}
973
974
975VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce)
976{
977 //AssertReleaseFailed(); /** @todo Called by the PGM saved state code. */
978 RT_NOREF(pVM, pVCpu, enmGuestMode, fForce);
979 return VINF_SUCCESS;
980}
981
982
983VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
984{
985 AssertReleaseFailed();
986 RT_NOREF(pVCpu, GCPtr, pfFlags, pHCPhys);
987 return VERR_NOT_SUPPORTED;
988}
989
990
991int pgmR3ExitShadowModeBeforePoolFlush(PVMCPU pVCpu)
992{
993 RT_NOREF(pVCpu);
994 return VINF_SUCCESS;
995}
996
997
998int pgmR3ReEnterShadowModeAfterPoolFlush(PVM pVM, PVMCPU pVCpu)
999{
1000 RT_NOREF(pVM, pVCpu);
1001 return VINF_SUCCESS;
1002}
1003
1004
1005void pgmR3RefreshShadowModeAfterA20Change(PVMCPU pVCpu)
1006{
1007 RT_NOREF(pVCpu);
1008}
1009
1010
1011int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1012{
1013 VMCPU_ASSERT_EMT(pVCpu);
1014 RT_NOREF(pGstWalk);
1015 return PGMGstGetPage(pVCpu, GCPtr, pWalk);
1016}
1017
1018
1019int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1020{
1021 VMCPU_ASSERT_EMT(pVCpu);
1022 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk); /** @todo Always do full walk for now. */
1023}
1024
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette