VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGM-armv8.cpp@ 107307

Last change on this file since 107307 was 107194, checked in by vboxsync, 2 months ago

VMM: More adjustments for VBOX_WITH_ONLY_PGM_NEM_MODE, VBOX_WITH_MINIMAL_R0, VBOX_WITH_HWVIRT and such. jiraref:VBP-1466

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 32.6 KB
Line 
1/* $Id: PGM-armv8.cpp 107194 2024-11-29 14:47:06Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, ARMv8 variant. (Mixing stuff here, not good?)
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_pgm_armv8 PGM - The Page Manager and Monitor (ARMv8 variant)
30 *
31 * For now this is just a stub for bringing up the ARMv8 hypervisor. We'll see how
32 * much we really need here later on and whether it makes sense to merge this with the original PGM.cpp
33 * (avoiding \#ifdef hell for with this as I'm not confident enough to fiddle around with PGM too much at this point).
34 */
35
36
37/*********************************************************************************************************************************
38* Header Files *
39*********************************************************************************************************************************/
40#define LOG_GROUP LOG_GROUP_PGM
41#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
42#include <VBox/vmm/dbgf.h>
43#include <VBox/vmm/pgm.h>
44#include <VBox/vmm/cpum.h>
45#include <VBox/vmm/cpum-armv8.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/sup.h>
48#include <VBox/vmm/mm.h>
49#include <VBox/vmm/em.h>
50#include <VBox/vmm/stam.h>
51#include <VBox/vmm/selm.h>
52#include <VBox/vmm/ssm.h>
53#include <VBox/vmm/hm.h>
54#include "PGMInternal.h"
55#include <VBox/vmm/vmcc.h>
56#include <VBox/vmm/uvm.h>
57#include "PGMInline.h"
58
59#include <VBox/dbg.h>
60#include <VBox/param.h>
61#include <VBox/err.h>
62
63#include <iprt/asm.h>
64#include <iprt/assert.h>
65#include <iprt/env.h>
66#include <iprt/file.h>
67#include <iprt/mem.h>
68#include <iprt/rand.h>
69#include <iprt/string.h>
70#include <iprt/thread.h>
71
72
73/*********************************************************************************************************************************
74* Internal Functions *
75*********************************************************************************************************************************/
76#ifdef VBOX_STRICT
77static FNVMATSTATE pgmR3ResetNoMorePhysWritesFlag;
78#endif
79
80
81/*********************************************************************************************************************************
82* Global Variables *
83*********************************************************************************************************************************/
84#ifndef VBOX_WITH_PGM_NEM_MODE
85# error "This requires VBOX_WITH_PGM_NEM_MODE to be set at all times!"
86#endif
87
88/**
89 * Interface that NEM uses to switch PGM into simplified memory managment mode.
90 *
91 * This call occurs before PGMR3Init.
92 *
93 * @param pVM The cross context VM structure.
94 */
95VMMR3_INT_DECL(void) PGMR3EnableNemMode(PVM pVM)
96{
97 AssertFatal(!PDMCritSectIsInitialized(&pVM->pgm.s.CritSectX));
98#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
99 pVM->pgm.s.fNemMode = true;
100#endif
101}
102
103
104/**
105 * Checks whether the simplificed memory management mode for NEM is enabled.
106 *
107 * @returns true if enabled, false if not.
108 * @param pVM The cross context VM structure.
109 */
110VMMR3_INT_DECL(bool) PGMR3IsNemModeEnabled(PVM pVM)
111{
112 RT_NOREF(pVM);
113 return PGM_IS_IN_NEM_MODE(pVM);
114}
115
116
117/**
118 * Initiates the paging of VM.
119 *
120 * @returns VBox status code.
121 * @param pVM The cross context VM structure.
122 */
123VMMR3DECL(int) PGMR3Init(PVM pVM)
124{
125 LogFlow(("PGMR3Init:\n"));
126
127 /*
128 * Assert alignment and sizes.
129 */
130 AssertCompile(sizeof(pVM->pgm.s) <= sizeof(pVM->pgm.padding));
131 AssertCompile(sizeof(pVM->apCpusR3[0]->pgm.s) <= sizeof(pVM->apCpusR3[0]->pgm.padding));
132 AssertCompileMemberAlignment(PGM, CritSectX, sizeof(uintptr_t));
133
134 bool const fDriverless = SUPR3IsDriverless();
135
136 /*
137 * Init the structure.
138 */
139 /*pVM->pgm.s.fRestoreRomPagesAtReset = false;*/
140
141 /* We always use the simplified memory mode on arm. */
142#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
143 pVM->pgm.s.fNemMode = true;
144#endif
145
146 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
147 {
148 pVM->pgm.s.aHandyPages[i].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS;
149 pVM->pgm.s.aHandyPages[i].fZeroed = false;
150 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
151 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
152 }
153
154 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLargeHandyPage); i++)
155 {
156 pVM->pgm.s.aLargeHandyPage[i].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS;
157 pVM->pgm.s.aLargeHandyPage[i].fZeroed = false;
158 pVM->pgm.s.aLargeHandyPage[i].idPage = NIL_GMM_PAGEID;
159 pVM->pgm.s.aLargeHandyPage[i].idSharedPage = NIL_GMM_PAGEID;
160 }
161
162 AssertReleaseReturn(pVM->pgm.s.cPhysHandlerTypes == 0, VERR_WRONG_ORDER);
163 for (size_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aPhysHandlerTypes); i++)
164 {
165#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
166 if (fDriverless)
167#endif
168 pVM->pgm.s.aPhysHandlerTypes[i].hType = i | (RTRandU64() & ~(uint64_t)PGMPHYSHANDLERTYPE_IDX_MASK);
169 pVM->pgm.s.aPhysHandlerTypes[i].enmKind = PGMPHYSHANDLERKIND_INVALID;
170 pVM->pgm.s.aPhysHandlerTypes[i].pfnHandler = pgmR3HandlerPhysicalHandlerInvalid;
171 }
172
173#if 0
174 /* Init the per-CPU part. */
175 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
176 {
177 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
178 PPGMCPU pPGM = &pVCpu->pgm.s;
179 }
180#endif
181
182 /*
183 * Read the configuration.
184 */
185 PCFGMNODE const pCfgPGM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/PGM");
186
187 /** @todo RamPreAlloc doesn't work for NEM-mode. */
188 int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "RamPreAlloc", &pVM->pgm.s.fRamPreAlloc,
189#ifdef VBOX_WITH_PREALLOC_RAM_BY_DEFAULT
190 true
191#else
192 false
193#endif
194 );
195 AssertLogRelRCReturn(rc, rc);
196
197#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
198 rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, UINT32_MAX);
199 AssertLogRelRCReturn(rc, rc);
200 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
201 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
202#endif
203
204 /*
205 * Get the configured RAM size - to estimate saved state size.
206 */
207 uint64_t cbRam;
208 rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
209 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
210 cbRam = 0;
211 else if (RT_SUCCESS(rc))
212 {
213 if (cbRam < GUEST_PAGE_SIZE)
214 cbRam = 0;
215 cbRam = RT_ALIGN_64(cbRam, GUEST_PAGE_SIZE);
216 }
217 else
218 {
219 AssertMsgFailed(("Configuration error: Failed to query integer \"RamSize\", rc=%Rrc.\n", rc));
220 return rc;
221 }
222
223 /** @cfgm{/PGM/ZeroRamPagesOnReset, boolean, true}
224 * Whether to clear RAM pages on (hard) reset. */
225 rc = CFGMR3QueryBoolDef(pCfgPGM, "ZeroRamPagesOnReset", &pVM->pgm.s.fZeroRamPagesOnReset, true);
226 AssertLogRelRCReturn(rc, rc);
227
228 /*
229 * Register callbacks, string formatters and the saved state data unit.
230 */
231#ifdef VBOX_STRICT
232 VMR3AtStateRegister(pVM->pUVM, pgmR3ResetNoMorePhysWritesFlag, NULL);
233#endif
234 PGMRegisterStringFormatTypes();
235
236 rc = pgmR3InitSavedState(pVM, cbRam);
237 if (RT_FAILURE(rc))
238 return rc;
239
240 /*
241 * Initialize the PGM critical section and flush the phys TLBs
242 */
243 rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSectX, RT_SRC_POS, "PGM");
244 AssertRCReturn(rc, rc);
245
246#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
247 pgmR3PhysChunkInvalidateTLB(pVM, false /*fInRendezvous*/); /* includes pgmPhysInvalidatePageMapTLB call */
248#endif
249
250 /*
251 * For the time being we sport a full set of handy pages in addition to the base
252 * memory to simplify things.
253 */
254 rc = MMR3ReserveHandyPages(pVM, RT_ELEMENTS(pVM->pgm.s.aHandyPages)); /** @todo this should be changed to PGM_HANDY_PAGES_MIN but this needs proper testing... */
255 AssertRCReturn(rc, rc);
256
257 /*
258 * Setup the zero page (HCPHysZeroPg is set by ring-0).
259 */
260 RT_ZERO(pVM->pgm.s.abZeroPg); /* paranoia */
261#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
262 if (fDriverless)
263 pVM->pgm.s.HCPhysZeroPg = _4G - GUEST_PAGE_SIZE * 2 /* fake to avoid PGM_PAGE_INIT_ZERO assertion */;
264 AssertRelease(pVM->pgm.s.HCPhysZeroPg != NIL_RTHCPHYS);
265 AssertRelease(pVM->pgm.s.HCPhysZeroPg != 0);
266#endif
267
268 /*
269 * Setup the invalid MMIO page (HCPhysMmioPg is set by ring-0).
270 * (The invalid bits in HCPhysInvMmioPg are set later on init complete.)
271 */
272 ASMMemFill32(pVM->pgm.s.abMmioPg, sizeof(pVM->pgm.s.abMmioPg), 0xfeedface);
273#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
274 if (fDriverless)
275 pVM->pgm.s.HCPhysMmioPg = _4G - GUEST_PAGE_SIZE * 3 /* fake to avoid PGM_PAGE_INIT_ZERO assertion */;
276 AssertRelease(pVM->pgm.s.HCPhysMmioPg != NIL_RTHCPHYS);
277 AssertRelease(pVM->pgm.s.HCPhysMmioPg != 0);
278 pVM->pgm.s.HCPhysInvMmioPg = pVM->pgm.s.HCPhysMmioPg;
279#endif
280
281 /*
282 * Initialize physical access handlers.
283 */
284 /** @cfgm{/PGM/MaxPhysicalAccessHandlers, uint32_t, 32, 65536, 6144}
285 * Number of physical access handlers allowed (subject to rounding). This is
286 * managed as one time allocation during initializations. The default is
287 * lower for a driverless setup. */
288 /** @todo can lower it for nested paging too, at least when there is no
289 * nested guest involved. */
290 uint32_t cAccessHandlers = 0;
291 rc = CFGMR3QueryU32Def(pCfgPGM, "MaxPhysicalAccessHandlers", &cAccessHandlers, !fDriverless ? 6144 : 640);
292 AssertLogRelRCReturn(rc, rc);
293 AssertLogRelMsgStmt(cAccessHandlers >= 32, ("cAccessHandlers=%#x, min 32\n", cAccessHandlers), cAccessHandlers = 32);
294 AssertLogRelMsgStmt(cAccessHandlers <= _64K, ("cAccessHandlers=%#x, max 65536\n", cAccessHandlers), cAccessHandlers = _64K);
295#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
296 if (!fDriverless)
297 {
298 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_PHYS_HANDLER_INIT, cAccessHandlers, NULL);
299 AssertRCReturn(rc, rc);
300 AssertPtr(pVM->pgm.s.pPhysHandlerTree);
301 AssertPtr(pVM->pgm.s.PhysHandlerAllocator.m_paNodes);
302 AssertPtr(pVM->pgm.s.PhysHandlerAllocator.m_pbmAlloc);
303 }
304 else
305#endif
306 {
307 uint32_t cbTreeAndBitmap = 0;
308 uint32_t const cbTotalAligned = pgmHandlerPhysicalCalcTableSizes(&cAccessHandlers, &cbTreeAndBitmap);
309 uint8_t *pb = NULL;
310 rc = SUPR3PageAlloc(cbTotalAligned >> HOST_PAGE_SHIFT, 0, (void **)&pb);
311 AssertLogRelRCReturn(rc, rc);
312
313 pVM->pgm.s.PhysHandlerAllocator.initSlabAllocator(cAccessHandlers, (PPGMPHYSHANDLER)&pb[cbTreeAndBitmap],
314 (uint64_t *)&pb[sizeof(PGMPHYSHANDLERTREE)]);
315 pVM->pgm.s.pPhysHandlerTree = (PPGMPHYSHANDLERTREE)pb;
316 pVM->pgm.s.pPhysHandlerTree->initWithAllocator(&pVM->pgm.s.PhysHandlerAllocator);
317 }
318
319 /*
320 * Register the physical access handler protecting ROMs.
321 */
322 if (RT_SUCCESS(rc))
323 /** @todo why isn't pgmPhysRomWriteHandler registered for ring-0? */
324 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, 0 /*fFlags*/, pgmPhysRomWriteHandler,
325 "ROM write protection", &pVM->pgm.s.hRomPhysHandlerType);
326
327 /*
328 * Register the physical access handler doing dirty MMIO2 tracing.
329 */
330 if (RT_SUCCESS(rc))
331 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, PGMPHYSHANDLER_F_KEEP_PGM_LOCK,
332 pgmPhysMmio2WriteHandler, "MMIO2 dirty page tracing",
333 &pVM->pgm.s.hMmio2DirtyPhysHandlerType);
334
335 if (RT_SUCCESS(rc))
336 return VINF_SUCCESS;
337
338 /* Almost no cleanup necessary, MM frees all memory. */
339 PDMR3CritSectDelete(pVM, &pVM->pgm.s.CritSectX);
340
341 return rc;
342}
343
344
345/**
346 * Ring-3 init finalizing (not required here).
347 *
348 * @returns VBox status code.
349 * @param pVM The cross context VM structure.
350 */
351VMMR3DECL(int) PGMR3InitFinalize(PVM pVM)
352{
353 RT_NOREF(pVM);
354 int rc = VINF_SUCCESS;
355#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
356 if (pVM->pgm.s.fRamPreAlloc)
357 rc = pgmR3PhysRamPreAllocate(pVM);
358#endif
359
360 //pgmLogState(pVM);
361 LogRel(("PGM: PGMR3InitFinalize done: %Rrc\n", rc));
362 return rc;
363}
364
365
366/**
367 * Init phase completed callback.
368 *
369 * @returns VBox status code.
370 * @param pVM The cross context VM structure.
371 * @param enmWhat What has been completed.
372 * @thread EMT(0)
373 */
374VMMR3_INT_DECL(int) PGMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
375{
376 switch (enmWhat)
377 {
378 case VMINITCOMPLETED_HM:
379 AssertLogRelReturn(!pVM->pgm.s.fPciPassthrough, VERR_PGM_PCI_PASSTHRU_MISCONFIG);
380 break;
381
382 default:
383 /* shut up gcc */
384 break;
385 }
386
387 return VINF_SUCCESS;
388}
389
390
391/**
392 * Applies relocations to data and code managed by this component.
393 *
394 * This function will be called at init and whenever the VMM need to relocate it
395 * self inside the GC.
396 *
397 * @param pVM The cross context VM structure.
398 * @param offDelta Relocation delta relative to old location.
399 */
400VMMR3DECL(void) PGMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
401{
402 LogFlow(("PGMR3Relocate: offDelta=%RGv\n", offDelta));
403 RT_NOREF(pVM, offDelta);
404}
405
406
407/**
408 * Resets a virtual CPU when unplugged.
409 *
410 * @param pVM The cross context VM structure.
411 * @param pVCpu The cross context virtual CPU structure.
412 */
413VMMR3DECL(void) PGMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
414{
415 RT_NOREF(pVM, pVCpu);
416}
417
418
419/**
420 * The VM is being reset.
421 *
422 * For the PGM component this means that any PD write monitors
423 * needs to be removed.
424 *
425 * @param pVM The cross context VM structure.
426 */
427VMMR3_INT_DECL(void) PGMR3Reset(PVM pVM)
428{
429 LogFlow(("PGMR3Reset:\n"));
430 VM_ASSERT_EMT(pVM);
431
432 PGM_LOCK_VOID(pVM);
433
434#ifdef DEBUG
435 DBGFR3_INFO_LOG_SAFE(pVM, "mappings", NULL);
436 DBGFR3_INFO_LOG_SAFE(pVM, "handlers", "all nostat");
437#endif
438
439 //pgmLogState(pVM);
440 PGM_UNLOCK(pVM);
441}
442
443
444/**
445 * Memory setup after VM construction or reset.
446 *
447 * @param pVM The cross context VM structure.
448 * @param fAtReset Indicates the context, after reset if @c true or after
449 * construction if @c false.
450 */
451VMMR3_INT_DECL(void) PGMR3MemSetup(PVM pVM, bool fAtReset)
452{
453 if (fAtReset)
454 {
455 PGM_LOCK_VOID(pVM);
456
457 int rc = pgmR3PhysRamZeroAll(pVM);
458 AssertReleaseRC(rc);
459
460 rc = pgmR3PhysRomReset(pVM);
461 AssertReleaseRC(rc);
462
463 PGM_UNLOCK(pVM);
464 }
465}
466
467
468#ifdef VBOX_STRICT
469/**
470 * VM state change callback for clearing fNoMorePhysWrites after
471 * a snapshot has been created.
472 */
473static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PUVM pUVM, PCVMMR3VTABLE pVMM, VMSTATE enmState,
474 VMSTATE enmOldState, void *pvUser)
475{
476 if ( enmState == VMSTATE_RUNNING
477 || enmState == VMSTATE_RESUMING)
478 pUVM->pVM->pgm.s.fNoMorePhysWrites = false;
479 RT_NOREF(pVMM, enmOldState, pvUser);
480}
481#endif
482
483/**
484 * Private API to reset fNoMorePhysWrites.
485 */
486VMMR3_INT_DECL(void) PGMR3ResetNoMorePhysWritesFlag(PVM pVM)
487{
488 pVM->pgm.s.fNoMorePhysWrites = false;
489}
490
491/**
492 * Terminates the PGM.
493 *
494 * @returns VBox status code.
495 * @param pVM The cross context VM structure.
496 */
497VMMR3DECL(int) PGMR3Term(PVM pVM)
498{
499 /* Must free shared pages here. */
500 PGM_LOCK_VOID(pVM);
501 pgmR3PhysRamTerm(pVM);
502 pgmR3PhysRomTerm(pVM);
503 PGM_UNLOCK(pVM);
504
505 PGMDeregisterStringFormatTypes();
506 return PDMR3CritSectDelete(pVM, &pVM->pgm.s.CritSectX);
507}
508
509
510/**
511 * Perform an integrity check on the PGM component.
512 *
513 * @returns VINF_SUCCESS if everything is fine.
514 * @returns VBox error status after asserting on integrity breach.
515 * @param pVM The cross context VM structure.
516 */
517VMMR3DECL(int) PGMR3CheckIntegrity(PVM pVM)
518{
519 RT_NOREF(pVM);
520 return VINF_SUCCESS;
521}
522
523
524VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
525{
526#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
527 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
528#else
529 RT_NOREF(pVM);
530 return false;
531#endif
532}
533
534
535VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
536{
537 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
538}
539
540
541VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
542{
543 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
544
545 pVM->pgm.s.fUseLargePages = fUseLargePages;
546 return VINF_SUCCESS;
547}
548
549
550#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
551int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
552#else
553int pgmLock(PVMCC pVM, bool fVoid)
554#endif
555{
556#if defined(VBOX_STRICT)
557 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
558#else
559 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
560#endif
561 if (RT_SUCCESS(rc))
562 return rc;
563 if (fVoid)
564 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
565 else
566 AssertRC(rc);
567 return rc;
568}
569
570
571void pgmUnlock(PVMCC pVM)
572{
573 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
574 pVM->pgm.s.cDeprecatedPageLocks = 0;
575 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
576 if (rc == VINF_SEM_NESTED)
577 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
578}
579
580
581#if !defined(IN_R0) || defined(LOG_ENABLED)
582
583/** Format handler for PGMPAGE.
584 * @copydoc FNRTSTRFORMATTYPE */
585static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
586 const char *pszType, void const *pvValue,
587 int cchWidth, int cchPrecision, unsigned fFlags,
588 void *pvUser)
589{
590 size_t cch;
591 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
592 if (RT_VALID_PTR(pPage))
593 {
594 char szTmp[64+80];
595
596 cch = 0;
597
598 /* The single char state stuff. */
599 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
600 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
601
602# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
603 if (IS_PART_INCLUDED(5))
604 {
605 static const char s_achHandlerStates[4*2] = { '-', 't', 'w', 'a' , '_', 'T', 'W', 'A' };
606 szTmp[cch++] = s_achHandlerStates[ PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)
607 | ((uint8_t)PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) << 2)];
608 }
609
610 /* The type. */
611 if (IS_PART_INCLUDED(4))
612 {
613 szTmp[cch++] = ':';
614 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
615 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
616 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
617 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
618 }
619
620 /* The numbers. */
621 if (IS_PART_INCLUDED(3))
622 {
623 szTmp[cch++] = ':';
624 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
625 }
626
627 if (IS_PART_INCLUDED(2))
628 {
629 szTmp[cch++] = ':';
630 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
631 }
632
633 if (IS_PART_INCLUDED(6))
634 {
635 szTmp[cch++] = ':';
636 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
637 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
638 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
639 }
640# undef IS_PART_INCLUDED
641
642 cch = pfnOutput(pvArgOutput, szTmp, cch);
643 }
644 else
645 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
646 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
647 return cch;
648}
649
650
651/** Format handler for PGMRAMRANGE.
652 * @copydoc FNRTSTRFORMATTYPE */
653static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
654 const char *pszType, void const *pvValue,
655 int cchWidth, int cchPrecision, unsigned fFlags,
656 void *pvUser)
657{
658 size_t cch;
659 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
660 if (RT_VALID_PTR(pRam))
661 {
662 char szTmp[80];
663 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
664 cch = pfnOutput(pvArgOutput, szTmp, cch);
665 }
666 else
667 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
668 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
669 return cch;
670}
671
672/** Format type andlers to be registered/deregistered. */
673static const struct
674{
675 char szType[24];
676 PFNRTSTRFORMATTYPE pfnHandler;
677} g_aPgmFormatTypes[] =
678{
679 { "pgmpage", pgmFormatTypeHandlerPage },
680 { "pgmramrange", pgmFormatTypeHandlerRamRange }
681};
682
683#endif /* !IN_R0 || LOG_ENABLED */
684
685
686VMMDECL(int) PGMRegisterStringFormatTypes(void)
687{
688#if !defined(IN_R0) || defined(LOG_ENABLED)
689 int rc = VINF_SUCCESS;
690 unsigned i;
691 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
692 {
693 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
694# ifdef IN_RING0
695 if (rc == VERR_ALREADY_EXISTS)
696 {
697 /* in case of cleanup failure in ring-0 */
698 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
699 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
700 }
701# endif
702 }
703 if (RT_FAILURE(rc))
704 while (i-- > 0)
705 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
706
707 return rc;
708#else
709 return VINF_SUCCESS;
710#endif
711}
712
713
714VMMDECL(void) PGMDeregisterStringFormatTypes(void)
715{
716#if !defined(IN_R0) || defined(LOG_ENABLED)
717 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
718 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
719#endif
720}
721
722
723VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
724{
725 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
726 VMCPU_ASSERT_EMT(pVCpu);
727
728 /*
729 * Validate input.
730 */
731 Assert(cb);
732
733 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
734 RT_NOREF(pVCpu, GCPtr, cb, fFlags, fMask);
735
736 AssertReleaseFailed();
737 return VERR_NOT_IMPLEMENTED;
738}
739
740
741VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
742{
743 VMCPU_ASSERT_EMT(pVCpu);
744
745 bool fMmuEnabled = CPUMGetGuestMmuEnabled(pVCpu);
746 if (!fMmuEnabled)
747 return PGMMODE_NONE;
748
749 CPUMMODE enmCpuMode = CPUMGetGuestMode(pVCpu);
750 return enmCpuMode == CPUMMODE_ARMV8_AARCH64
751 ? PGMMODE_VMSA_V8_64
752 : PGMMODE_VMSA_V8_32;
753}
754
755
756VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
757{
758 RT_NOREF(pVCpu);
759 return PGMMODE_NONE; /* NEM doesn't need any shadow paging. */
760}
761
762
763DECLINLINE(int) pgmGstWalkReturnNotPresent(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
764{
765 NOREF(pVCpu);
766 pWalk->fNotPresent = true;
767 pWalk->uLevel = uLevel;
768 pWalk->fFailed = PGM_WALKFAIL_NOT_PRESENT
769 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
770 return VERR_PAGE_TABLE_NOT_PRESENT;
771}
772
773DECLINLINE(int) pgmGstWalkReturnBadPhysAddr(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel, int rc)
774{
775 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
776 pWalk->fBadPhysAddr = true;
777 pWalk->uLevel = uLevel;
778 pWalk->fFailed = PGM_WALKFAIL_BAD_PHYSICAL_ADDRESS
779 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
780 return VERR_PAGE_TABLE_NOT_PRESENT;
781}
782
783
784DECLINLINE(int) pgmGstWalkReturnRsvdError(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
785{
786 NOREF(pVCpu);
787 pWalk->fRsvdError = true;
788 pWalk->uLevel = uLevel;
789 pWalk->fFailed = PGM_WALKFAIL_RESERVED_BITS
790 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
791 return VERR_PAGE_TABLE_NOT_PRESENT;
792}
793
794
795VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
796{
797 VMCPU_ASSERT_EMT(pVCpu);
798 Assert(pWalk);
799
800 pWalk->fSucceeded = false;
801
802 RTGCPHYS GCPhysPt = CPUMGetEffectiveTtbr(pVCpu, GCPtr);
803 if (GCPhysPt == RTGCPHYS_MAX) /* MMU disabled? */
804 {
805 pWalk->GCPtr = GCPtr;
806 pWalk->fSucceeded = true;
807 pWalk->GCPhys = GCPtr;
808 return VINF_SUCCESS;
809 }
810
811 /* Do the translation. */
812 /** @todo This is just a sketch to get something working for debugging, assumes 4KiB granules and 48-bit output address.
813 * Needs to be moved to PGMAllGst like on x86 and implemented for 16KiB and 64KiB granule sizes. */
814 uint64_t u64TcrEl1 = CPUMGetTcrEl1(pVCpu);
815 uint8_t u8TxSz = (GCPtr & RT_BIT_64(55))
816 ? ARMV8_TCR_EL1_AARCH64_T1SZ_GET(u64TcrEl1)
817 : ARMV8_TCR_EL1_AARCH64_T0SZ_GET(u64TcrEl1);
818 uint8_t uLookupLvl;
819 RTGCPHYS fLookupMask;
820
821 /*
822 * From: https://github.com/codingbelief/arm-architecture-reference-manual-for-armv8-a/blob/master/en/chapter_d4/d42_2_controlling_address_translation_stages.md
823 * For all translation stages
824 * The maximum TxSZ value is 39. If TxSZ is programmed to a value larger than 39 then it is IMPLEMENTATION DEFINED whether:
825 * - The implementation behaves as if the field is programmed to 39 for all purposes other than reading back the value of the field.
826 * - Any use of the TxSZ value generates a Level 0 Translation fault for the stage of translation at which TxSZ is used.
827 *
828 * For a stage 1 translation
829 * The minimum TxSZ value is 16. If TxSZ is programmed to a value smaller than 16 then it is IMPLEMENTATION DEFINED whether:
830 * - The implementation behaves as if the field were programmed to 16 for all purposes other than reading back the value of the field.
831 * - Any use of the TxSZ value generates a stage 1 Level 0 Translation fault.
832 *
833 * We currently choose the former for both.
834 */
835 if (/*u8TxSz >= 16 &&*/ u8TxSz <= 24)
836 {
837 uLookupLvl = 0;
838 fLookupMask = RT_BIT_64(24 - u8TxSz + 1) - 1;
839 }
840 else if (u8TxSz >= 25 && u8TxSz <= 33)
841 {
842 uLookupLvl = 1;
843 fLookupMask = RT_BIT_64(33 - u8TxSz + 1) - 1;
844 }
845 else /*if (u8TxSz >= 34 && u8TxSz <= 39)*/
846 {
847 uLookupLvl = 2;
848 fLookupMask = RT_BIT_64(39 - u8TxSz + 1) - 1;
849 }
850 /*else
851 return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 0, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);*/ /** @todo Better status (Invalid TCR config). */
852
853 uint64_t *pu64Pt = NULL;
854 uint64_t uPt;
855 int rc;
856 if (uLookupLvl == 0)
857 {
858 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
859 if (RT_SUCCESS(rc)) { /* probable */ }
860 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 0, rc);
861
862 uPt = pu64Pt[(GCPtr >> 39) & fLookupMask];
863 if (uPt & RT_BIT_64(0)) { /* probable */ }
864 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 0);
865
866 if (uPt & RT_BIT_64(1)) { /* probable */ }
867 else return pgmGstWalkReturnRsvdError(pVCpu, pWalk, 0); /** @todo Only supported if TCR_EL1.DS is set. */
868
869 /* All nine bits from now on. */
870 fLookupMask = RT_BIT_64(9) - 1;
871 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
872 }
873
874 if (uLookupLvl <= 1)
875 {
876 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
877 if (RT_SUCCESS(rc)) { /* probable */ }
878 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 1, rc);
879
880 uPt = pu64Pt[(GCPtr >> 30) & fLookupMask];
881 if (uPt & RT_BIT_64(0)) { /* probable */ }
882 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 1);
883
884 if (uPt & RT_BIT_64(1)) { /* probable */ }
885 else
886 {
887 /* Block descriptor (1G page). */
888 pWalk->GCPtr = GCPtr;
889 pWalk->fSucceeded = true;
890 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xffffc0000000)) | (GCPtr & (RTGCPTR)(_1G - 1));
891 pWalk->fGigantPage = true;
892 return VINF_SUCCESS;
893 }
894
895 /* All nine bits from now on. */
896 fLookupMask = RT_BIT_64(9) - 1;
897 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
898 }
899
900 if (uLookupLvl <= 2)
901 {
902 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
903 if (RT_SUCCESS(rc)) { /* probable */ }
904 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 2, rc);
905
906 uPt = pu64Pt[(GCPtr >> 21) & fLookupMask];
907 if (uPt & RT_BIT_64(0)) { /* probable */ }
908 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 2);
909
910 if (uPt & RT_BIT_64(1)) { /* probable */ }
911 else
912 {
913 /* Block descriptor (2M page). */
914 pWalk->GCPtr = GCPtr;
915 pWalk->fSucceeded = true;
916 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xffffffe00000)) | (GCPtr & (RTGCPTR)(_2M - 1));
917 pWalk->fBigPage = true;
918 return VINF_SUCCESS;
919 }
920
921 /* All nine bits from now on. */
922 fLookupMask = RT_BIT_64(9) - 1;
923 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
924 }
925
926 Assert(uLookupLvl <= 3);
927
928 /* Next level. */
929 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
930 if (RT_SUCCESS(rc)) { /* probable */ }
931 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 3, rc);
932
933 uPt = pu64Pt[(GCPtr & UINT64_C(0x1ff000)) >> 12];
934 if (uPt & RT_BIT_64(0)) { /* probable */ }
935 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 3);
936
937 if (uPt & RT_BIT_64(1)) { /* probable */ }
938 else return pgmGstWalkReturnRsvdError(pVCpu, pWalk, 3); /** No block descriptors. */
939
940 pWalk->GCPtr = GCPtr;
941 pWalk->fSucceeded = true;
942 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000)) | (GCPtr & (RTGCPTR)(_4K - 1));
943 return VINF_SUCCESS;
944}
945
946
947VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
948{
949 AssertReleaseFailed();
950 RT_NOREF(pVCpu, GCPtr, fOpFlags);
951 return VERR_NOT_IMPLEMENTED;
952}
953
954
955VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
956{
957 AssertReleaseFailed();
958 RT_NOREF(pVCpu, GCPtr, fOpFlags);
959 return VERR_NOT_IMPLEMENTED;
960}
961
962
963VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
964{
965 AssertReleaseFailed();
966 RT_NOREF(pVCpu, GCPtr, fOpFlags);
967 return VERR_NOT_IMPLEMENTED;
968}
969
970
971VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce)
972{
973 //AssertReleaseFailed(); /** @todo Called by the PGM saved state code. */
974 RT_NOREF(pVM, pVCpu, enmGuestMode, fForce);
975 return VINF_SUCCESS;
976}
977
978
979VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
980{
981 AssertReleaseFailed();
982 RT_NOREF(pVCpu, GCPtr, pfFlags, pHCPhys);
983 return VERR_NOT_SUPPORTED;
984}
985
986
987int pgmR3ExitShadowModeBeforePoolFlush(PVMCPU pVCpu)
988{
989 RT_NOREF(pVCpu);
990 return VINF_SUCCESS;
991}
992
993
994int pgmR3ReEnterShadowModeAfterPoolFlush(PVM pVM, PVMCPU pVCpu)
995{
996 RT_NOREF(pVM, pVCpu);
997 return VINF_SUCCESS;
998}
999
1000
1001void pgmR3RefreshShadowModeAfterA20Change(PVMCPU pVCpu)
1002{
1003 RT_NOREF(pVCpu);
1004}
1005
1006
1007int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1008{
1009 VMCPU_ASSERT_EMT(pVCpu);
1010 RT_NOREF(pGstWalk);
1011 return PGMGstGetPage(pVCpu, GCPtr, pWalk);
1012}
1013
1014
1015int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1016{
1017 VMCPU_ASSERT_EMT(pVCpu);
1018 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk); /** @todo Always do full walk for now. */
1019}
1020
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette