VirtualBox

source: vbox/trunk/include/VBox/vmm/pgm.h@ 108844

Last change on this file since 108844 was 108844, checked in by vboxsync, 2 weeks ago

VMM/PGM,NEM: Some early page table management infrastructure for ARMv8, bugref:10388 [build fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 74.5 KB
Line 
1/** @file
2 * PGM - Page Monitor / Monitor.
3 */
4
5/*
6 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
7 *
8 * This file is part of VirtualBox base platform packages, as
9 * available from https://www.virtualbox.org.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation, in version 3 of the
14 * License.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <https://www.gnu.org/licenses>.
23 *
24 * The contents of this file may alternatively be used under the terms
25 * of the Common Development and Distribution License Version 1.0
26 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
27 * in the VirtualBox distribution, in which case the provisions of the
28 * CDDL are applicable instead of those of the GPL.
29 *
30 * You may elect to license modified versions of this file under the
31 * terms and conditions of either the GPL or the CDDL or both.
32 *
33 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
34 */
35
36#ifndef VBOX_INCLUDED_vmm_pgm_h
37#define VBOX_INCLUDED_vmm_pgm_h
38#ifndef RT_WITHOUT_PRAGMA_ONCE
39# pragma once
40#endif
41
42#include <VBox/types.h>
43#include <VBox/sup.h>
44#include <VBox/vmm/vmapi.h>
45#include <VBox/vmm/gmm.h> /* for PGMMREGISTERSHAREDMODULEREQ */
46#include <VBox/vmm/hm_vmx.h>
47#include <iprt/x86.h>
48#include <VBox/param.h>
49
50RT_C_DECLS_BEGIN
51
52/** @defgroup grp_pgm The Page Monitor / Manager API
53 * @ingroup grp_vmm
54 * @{
55 */
56
57/**
58 * FNPGMRELOCATE callback mode.
59 */
60typedef enum PGMRELOCATECALL
61{
62 /** The callback is for checking if the suggested address is suitable. */
63 PGMRELOCATECALL_SUGGEST = 1,
64 /** The callback is for executing the relocation. */
65 PGMRELOCATECALL_RELOCATE
66} PGMRELOCATECALL;
67
68
69/**
70 * Callback function which will be called when PGM is trying to find
71 * a new location for the mapping.
72 *
73 * The callback is called in two modes, 1) the check mode and 2) the relocate mode.
74 * In 1) the callback should say if it objects to a suggested new location. If it
75 * accepts the new location, it is called again for doing it's relocation.
76 *
77 *
78 * @returns true if the location is ok.
79 * @returns false if another location should be found.
80 * @param pVM The cross context VM structure.
81 * @param GCPtrOld The old virtual address.
82 * @param GCPtrNew The new virtual address.
83 * @param enmMode Used to indicate the callback mode.
84 * @param pvUser User argument.
85 * @remark The return value is no a failure indicator, it's an acceptance
86 * indicator. Relocation can not fail!
87 */
88typedef DECLCALLBACKTYPE(bool, FNPGMRELOCATE,(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser));
89/** Pointer to a relocation callback function. */
90typedef FNPGMRELOCATE *PFNPGMRELOCATE;
91
92
93/**
94 * Memory access origin.
95 */
96typedef enum PGMACCESSORIGIN
97{
98 /** Invalid zero value. */
99 PGMACCESSORIGIN_INVALID = 0,
100 /** IEM is access memory. */
101 PGMACCESSORIGIN_IEM,
102 /** HM is access memory. */
103 PGMACCESSORIGIN_HM,
104 /** Some device is access memory. */
105 PGMACCESSORIGIN_DEVICE,
106 /** Someone debugging is access memory. */
107 PGMACCESSORIGIN_DEBUGGER,
108 /** SELM is access memory. */
109 PGMACCESSORIGIN_SELM,
110 /** FTM is access memory. */
111 PGMACCESSORIGIN_FTM,
112 /** REM is access memory. */
113 PGMACCESSORIGIN_REM,
114 /** IOM is access memory. */
115 PGMACCESSORIGIN_IOM,
116 /** End of valid values. */
117 PGMACCESSORIGIN_END,
118 /** Type size hack. */
119 PGMACCESSORIGIN_32BIT_HACK = 0x7fffffff
120} PGMACCESSORIGIN;
121
122
123/**
124 * Physical page access handler kind.
125 */
126typedef enum PGMPHYSHANDLERKIND
127{
128 /** Invalid zero value. */
129 PGMPHYSHANDLERKIND_INVALID = 0,
130 /** MMIO range. Pages are not present, all access is done in interpreter or recompiler. */
131 PGMPHYSHANDLERKIND_MMIO,
132 /** Handler all write access to a physical page range. */
133 PGMPHYSHANDLERKIND_WRITE,
134 /** Handler all access to a physical page range. */
135 PGMPHYSHANDLERKIND_ALL,
136 /** End of the valid values. */
137 PGMPHYSHANDLERKIND_END,
138 /** Type size hack. */
139 PGMPHYSHANDLERKIND_32BIT_HACK = 0x7fffffff
140} PGMPHYSHANDLERKIND;
141
142/**
143 * Guest Access type
144 */
145typedef enum PGMACCESSTYPE
146{
147 /** Read access. */
148 PGMACCESSTYPE_READ = 1,
149 /** Write access. */
150 PGMACCESSTYPE_WRITE
151} PGMACCESSTYPE;
152
153
154/** @def PGM_ALL_CB_DECL
155 * Macro for declaring a handler callback for all contexts. The handler
156 * callback is static in ring-3, and exported in RC and R0.
157 * @sa PGM_ALL_CB2_DECL.
158 */
159#if defined(IN_RC) || defined(IN_RING0)
160# ifdef __cplusplus
161# define PGM_ALL_CB_DECL(type) extern "C" DECLCALLBACK(DECLEXPORT(type))
162# else
163# define PGM_ALL_CB_DECL(type) DECLCALLBACK(DECLEXPORT(type))
164# endif
165#else
166# define PGM_ALL_CB_DECL(type) static DECLCALLBACK(type)
167#endif
168
169/** @def PGM_ALL_CB2_DECL
170 * Macro for declaring a handler callback for all contexts. The handler
171 * callback is hidden in ring-3, and exported in RC and R0.
172 * @sa PGM_ALL_CB2_DECL.
173 */
174#if defined(IN_RC) || defined(IN_RING0)
175# ifdef __cplusplus
176# define PGM_ALL_CB2_DECL(type) extern "C" DECLCALLBACK(DECLEXPORT(type))
177# else
178# define PGM_ALL_CB2_DECL(type) DECLCALLBACK(DECLEXPORT(type))
179# endif
180#else
181# define PGM_ALL_CB2_DECL(type) DECL_HIDDEN_CALLBACK(type)
182#endif
183
184/** @def PGM_ALL_CB2_PROTO
185 * Macro for declaring a handler callback for all contexts. The handler
186 * callback is hidden in ring-3, and exported in RC and R0.
187 * @param fnType The callback function type.
188 * @sa PGM_ALL_CB2_DECL.
189 */
190#if defined(IN_RC) || defined(IN_RING0)
191# ifdef __cplusplus
192# define PGM_ALL_CB2_PROTO(fnType) extern "C" DECLEXPORT(fnType)
193# else
194# define PGM_ALL_CB2_PROTO(fnType) DECLEXPORT(fnType)
195# endif
196#else
197# define PGM_ALL_CB2_PROTO(fnType) DECLHIDDEN(fnType)
198#endif
199
200
201/**
202 * \#PF Handler callback for physical access handler ranges in RC and R0.
203 *
204 * @returns Strict VBox status code (appropriate for ring-0 and raw-mode).
205 * @param pVM The cross context VM structure.
206 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
207 * @param uErrorCode CPU Error code.
208 * @param pCtx Pointer to the register context for the CPU.
209 * @param pvFault The fault address (cr2).
210 * @param GCPhysFault The GC physical address corresponding to pvFault.
211 * @param uUser User argument (not a pointer).
212 * @thread EMT(pVCpu)
213 */
214typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNPGMRZPHYSPFHANDLER,(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
215 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser));
216/** Pointer to PGM access callback. */
217typedef FNPGMRZPHYSPFHANDLER *PFNPGMRZPHYSPFHANDLER;
218
219
220/**
221 * Access handler callback for physical access handler ranges.
222 *
223 * The handler can not raise any faults, it's mainly for monitoring write access
224 * to certain pages (like MMIO).
225 *
226 * @returns Strict VBox status code in ring-0 and raw-mode context, in ring-3
227 * the only supported informational status code is
228 * VINF_PGM_HANDLER_DO_DEFAULT.
229 * @retval VINF_SUCCESS if the handler have carried out the operation.
230 * @retval VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the
231 * access operation.
232 * @retval VINF_EM_XXX in ring-0 and raw-mode context.
233 *
234 * @param pVM The cross context VM structure.
235 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
236 * @param GCPhys The physical address the guest is writing to.
237 * @param pvPhys The HC mapping of that address.
238 * @param pvBuf What the guest is reading/writing.
239 * @param cbBuf How much it's reading/writing.
240 * @param enmAccessType The access type.
241 * @param enmOrigin The origin of this call.
242 * @param uUser User argument (not a pointer).
243 * @thread EMT(pVCpu)
244 */
245typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNPGMPHYSHANDLER,(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys,
246 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
247 PGMACCESSORIGIN enmOrigin, uint64_t uUser));
248/** Pointer to PGM access callback. */
249typedef FNPGMPHYSHANDLER *PFNPGMPHYSHANDLER;
250
251
252/** @todo r=aeichner This doesn't seem to be used outside of the VMM module, so we might make
253 * all APIs (PGMGetGuestMode(), etc.) internal and split this up into an
254 * x86 and arm specific header. */
255/**
256 * Paging mode.
257 *
258 * @note Part of saved state. Change with extreme care.
259 * @note Due to PGMGetShadowMode() and the possibility that we will be
260 * running ARMv8 VMs on a AMD64 hosts, it's safer to combine these
261 * modes. We could rethink this if we start using PGMMODE exclusively
262 * for the guest mode and come up with a different enum for the host.
263 */
264typedef enum PGMMODE
265{
266 /** The usual invalid value. */
267 PGMMODE_INVALID = 0,
268
269 /** @name X86
270 * @{ */
271 /** Real mode. */
272 PGMMODE_REAL,
273 /** Protected mode, no paging. */
274 PGMMODE_PROTECTED,
275 /** 32-bit paging. */
276 PGMMODE_32_BIT,
277 /** PAE paging. */
278 PGMMODE_PAE,
279 /** PAE paging with NX enabled. */
280 PGMMODE_PAE_NX,
281 /** 64-bit AMD paging (long mode). */
282 PGMMODE_AMD64,
283 /** 64-bit AMD paging (long mode) with NX enabled. */
284 PGMMODE_AMD64_NX,
285 /** 32-bit nested paging mode (shadow only; guest physical to host physical). */
286 PGMMODE_NESTED_32BIT,
287 /** PAE nested paging mode (shadow only; guest physical to host physical). */
288 PGMMODE_NESTED_PAE,
289 /** AMD64 nested paging mode (shadow only; guest physical to host physical). */
290 PGMMODE_NESTED_AMD64,
291 /** Extended paging (Intel) mode. */
292 PGMMODE_EPT,
293 /** @} */
294
295 /** ARMv8: Paging is not enabled by the guest.
296 * AMD64 host: Special mode used by NEM to indicate no shadow paging
297 * necessary. Not used by X86 guests. */
298 PGMMODE_NONE = 32,
299
300 /** @name ARMv8
301 * @{ */
302 /** VMSAv8-32 Virtual Memory System Architecture v8 - 32-bit variant enabled. */
303 PGMMODE_VMSA_V8_32,
304 /** VMSAv8-64 Virtual Memory System Architecture v8 - 64-bit variant enabled. */
305 PGMMODE_VMSA_V8_64,
306 /** @} */
307
308 /** The max number of modes */
309 PGMMODE_MAX,
310 /** 32bit hackishness. */
311 PGMMODE_32BIT_HACK = 0x7fffffff
312} PGMMODE;
313
314
315/**
316 * Second level address translation (SLAT) mode.
317 */
318typedef enum PGMSLAT
319{
320 /** The usual invalid value. */
321 PGMSLAT_INVALID = 0,
322 /** No second level translation. */
323 PGMSLAT_DIRECT,
324 /** Intel Extended Page Tables (EPT). */
325 PGMSLAT_EPT,
326 /** AMD-V Nested Paging 32-bit. */
327 PGMSLAT_32BIT,
328 /** AMD-V Nested Paging PAE. */
329 PGMSLAT_PAE,
330 /** AMD-V Nested Paging 64-bit. */
331 PGMSLAT_AMD64,
332 /** 32bit hackishness. */
333 PGMSLAT_32BIT_HACK = 0x7fffffff
334} PGMSLAT;
335
336
337/** @name PGMPTWALK::fFailed flags.
338 * These flags indicate the type of a page-walk failure.
339 * @{
340 */
341typedef uint32_t PGMWALKFAIL;
342/** No fault. */
343#define PGM_WALKFAIL_SUCCESS UINT32_C(0)
344
345/** Not present (X86_TRAP_PF_P). */
346#define PGM_WALKFAIL_NOT_PRESENT RT_BIT_32(0)
347/** Reserved bit set in table entry (X86_TRAP_PF_RSVD). */
348#define PGM_WALKFAIL_RESERVED_BITS RT_BIT_32(1)
349/** Bad physical address (VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS). */
350#define PGM_WALKFAIL_BAD_PHYSICAL_ADDRESS RT_BIT_32(2)
351
352/** EPT violation - Intel. */
353#define PGM_WALKFAIL_EPT_VIOLATION RT_BIT_32(3)
354/** EPT violation, convertible to \#VE exception - Intel. */
355#define PGM_WALKFAIL_EPT_VIOLATION_CONVERTIBLE RT_BIT_32(4)
356/** EPT misconfiguration - Intel. */
357#define PGM_WALKFAIL_EPT_MISCONFIG RT_BIT_32(5)
358/** Mask of all EPT induced page-walk failures - Intel. */
359#define PGM_WALKFAIL_EPT ( PGM_WALKFAIL_EPT_VIOLATION \
360 | PGM_WALKFAIL_EPT_VIOLATION_CONVERTIBLE \
361 | PGM_WALKFAIL_EPT_MISCONFIG)
362
363/** Access denied: Not writable (VERR_ACCESS_DENIED). */
364#define PGM_WALKFAIL_NOT_WRITABLE RT_BIT_32(6)
365/** Access denied: Not executable (VERR_ACCESS_DENIED). */
366#define PGM_WALKFAIL_NOT_EXECUTABLE RT_BIT_32(7)
367/** Access denied: Not user/supervisor mode accessible (VERR_ACCESS_DENIED). */
368#define PGM_WALKFAIL_NOT_ACCESSIBLE_BY_MODE RT_BIT_32(8)
369
370/** The level the problem arrised at.
371 * PTE is level 1, PDE is level 2, PDPE is level 3, PML4 is level 4, CR3 is
372 * level 8. This is 0 on success. */
373#define PGM_WALKFAIL_LEVEL_MASK UINT32_C(0x0000f100)
374/** Level shift (see PGM_WALKFAIL_LEVEL_MASK). */
375#define PGM_WALKFAIL_LEVEL_SHIFT 11
376
377/** @} */
378
379
380/** PGM page-table attributes.
381 *
382 * This is VirtualBox's combined page table attributes. This combines
383 * attributes from the regular page/translation tables and the nested page
384 * tables / stage 2 translation tables. */
385typedef uint64_t PGMPTATTRS;
386/** Pointer to a PGMPTATTRS type. */
387typedef PGMPTATTRS *PPGMPTATTRS;
388
389#if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING)
390/** @name PGM_PTATTRS_XXX - PGM page-table attributes, x86 edition.
391 *
392 * The following bits map 1:1 (shifted by PGM_PTATTRS_EPT_SHIFT) to the Intel EPT
393 * attributes as these are unique to EPT and fit within 64-bits despite the shift:
394 * - EPT_R : Read access.
395 * - EPT_W : Write access.
396 * - EPT_X_SUPER : Execute or execute for supervisor-mode linear addr access.
397 * - EPT_MEMTYPE : EPT memory type.
398 * - EPT_IGNORE_PAT: Ignore PAT memory type.
399 * - EPT_X_USER : Execute access for user-mode linear addresses.
400 *
401 * For regular page tables, the R bit is always 1 (same as P bit).
402 * For Intel EPT, the EPT_R and EPT_W bits are copied to R and W bits respectively.
403 *
404 * The following EPT attributes are mapped to the following positions because they
405 * exist in the regular page tables at these positions OR are exclusive to EPT and
406 * have been mapped to arbitrarily chosen positions:
407 * - EPT_A : Accessed (EPT bit 8 maps to bit 5).
408 * - EPT_D : Dirty (EPT bit 9 maps to bit 6).
409 * - EPT_SUPER_SHW_STACK : Supervisor Shadow Stack (EPT bit 60 maps to bit 24).
410 * - EPT_SUPPRESS_VE_XCPT: Suppress \#VE exception (EPT bit 63 maps to bit 25).
411 *
412 * Bits 12, 11:9 and 43 are deliberately kept unused (correspond to bit PS and bits
413 * 11:9 in the regular page-table structures and to bit 11 in the EPT structures
414 * respectively) as bit 12 is the page-size bit and bits 11:9 are reserved for
415 * use by software and we may want to use/preserve them in the future.
416 *
417 * @{ */
418/** Read bit (always 1 for regular PT, copy of EPT_R for EPT). */
419#define PGM_PTATTRS_R_SHIFT 0
420#define PGM_PTATTRS_R_MASK RT_BIT_64(PGM_PTATTRS_R_SHIFT)
421/** Write access bit (aka read/write bit for regular PT). */
422#define PGM_PTATTRS_W_SHIFT 1
423#define PGM_PTATTRS_W_MASK RT_BIT_64(PGM_PTATTRS_W_SHIFT)
424/** User-mode access bit. */
425#define PGM_PTATTRS_US_SHIFT 2
426#define PGM_PTATTRS_US_MASK RT_BIT_64(PGM_PTATTRS_US_SHIFT)
427/** Write through cache bit. */
428#define PGM_PTATTRS_PWT_SHIFT 3
429#define PGM_PTATTRS_PWT_MASK RT_BIT_64(PGM_PTATTRS_PWT_SHIFT)
430/** Cache disabled bit. */
431#define PGM_PTATTRS_PCD_SHIFT 4
432#define PGM_PTATTRS_PCD_MASK RT_BIT_64(PGM_PTATTRS_PCD_SHIFT)
433/** Accessed bit. */
434#define PGM_PTATTRS_A_SHIFT 5
435#define PGM_PTATTRS_A_MASK RT_BIT_64(PGM_PTATTRS_A_SHIFT)
436/** Dirty bit. */
437#define PGM_PTATTRS_D_SHIFT 6
438#define PGM_PTATTRS_D_MASK RT_BIT_64(PGM_PTATTRS_D_SHIFT)
439/** The PAT bit. */
440#define PGM_PTATTRS_PAT_SHIFT 7
441#define PGM_PTATTRS_PAT_MASK RT_BIT_64(PGM_PTATTRS_PAT_SHIFT)
442/** The global bit. */
443#define PGM_PTATTRS_G_SHIFT 8
444#define PGM_PTATTRS_G_MASK RT_BIT_64(PGM_PTATTRS_G_SHIFT)
445/** Reserved (bits 12:9) unused. */
446#define PGM_PTATTRS_RSVD_12_9_SHIFT 9
447#define PGM_PTATTRS_RSVD_12_9_MASK UINT64_C(0x0000000000001e00)
448/** Read access bit - EPT only. */
449#define PGM_PTATTRS_EPT_R_SHIFT 13
450#define PGM_PTATTRS_EPT_R_MASK RT_BIT_64(PGM_PTATTRS_EPT_R_SHIFT)
451/** Write access bit - EPT only. */
452#define PGM_PTATTRS_EPT_W_SHIFT 14
453#define PGM_PTATTRS_EPT_W_MASK RT_BIT_64(PGM_PTATTRS_EPT_W_SHIFT)
454/** Execute or execute access for supervisor-mode linear addresses - EPT only. */
455#define PGM_PTATTRS_EPT_X_SUPER_SHIFT 15
456#define PGM_PTATTRS_EPT_X_SUPER_MASK RT_BIT_64(PGM_PTATTRS_EPT_X_SUPER_SHIFT)
457/** EPT memory type - EPT only. */
458#define PGM_PTATTRS_EPT_MEMTYPE_SHIFT 16
459#define PGM_PTATTRS_EPT_MEMTYPE_MASK UINT64_C(0x0000000000070000)
460/** Ignore PAT memory type - EPT only. */
461#define PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT 19
462#define PGM_PTATTRS_EPT_IGNORE_PAT_MASK RT_BIT_64(PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT)
463/** Leaf paging entry (big or regular) - EPT only. */
464#define PGM_PTATTRS_EPT_LEAF_SHIFT 20
465#define PGM_PTATTRS_EPT_LEAF_MASK RT_BIT_64(PGM_PTATTRS_EPT_LEAF_SHIFT)
466/** Accessed bit - EPT only. */
467#define PGM_PTATTRS_EPT_A_SHIFT 21
468#define PGM_PTATTRS_EPT_A_MASK RT_BIT_64(PGM_PTATTRS_EPT_A_SHIFT)
469/** Dirty bit - EPT only. */
470#define PGM_PTATTRS_EPT_D_SHIFT 22
471#define PGM_PTATTRS_EPT_D_MASK RT_BIT_64(PGM_PTATTRS_EPT_D_SHIFT)
472/** Execute access for user-mode linear addresses - EPT only. */
473#define PGM_PTATTRS_EPT_X_USER_SHIFT 23
474#define PGM_PTATTRS_EPT_X_USER_MASK RT_BIT_64(PGM_PTATTRS_EPT_X_USER_SHIFT)
475/** Reserved (bits 29:24) - unused. */
476#define PGM_PTATTRS_RSVD_29_24_SHIFT 24
477#define PGM_PTATTRS_RSVD_29_24_MASK UINT64_C(0x000000003f000000)
478/** Verify Guest Paging - EPT only. */
479#define PGM_PTATTRS_EPT_VGP_SHIFT 30
480#define PGM_PTATTRS_EPT_VGP_MASK RT_BIT_64(PGM_PTATTRS_EPT_VGP_SHIFT)
481/** Paging-write - EPT only. */
482#define PGM_PTATTRS_EPT_PW_SHIFT 31
483#define PGM_PTATTRS_EPT_PW_MASK RT_BIT_64(PGM_PTATTRS_EPT_PW_SHIFT)
484/** Reserved (bit 32) - unused. */
485#define PGM_PTATTRS_RSVD_32_SHIFT 32
486#define PGM_PTATTRS_RSVD_32_MASK UINT64_C(0x0000000100000000)
487/** Supervisor shadow stack - EPT only. */
488#define PGM_PTATTRS_EPT_SSS_SHIFT 33
489#define PGM_PTATTRS_EPT_SSS_MASK RT_BIT_64(PGM_PTATTRS_EPT_SSS_SHIFT)
490/** Sub-page write permission - EPT only. */
491#define PGM_PTATTRS_EPT_SPP_SHIFT 34
492#define PGM_PTATTRS_EPT_SPP_MASK RT_BIT_64(PGM_PTATTRS_EPT_SPP_SHIFT)
493/** Reserved (bit 35) - unused. */
494#define PGM_PTATTRS_RSVD_35_SHIFT 35
495#define PGM_PTATTRS_RSVD_35_MASK UINT64_C(0x0000000800000000)
496/** Suppress \#VE exception - EPT only. */
497#define PGM_PTATTRS_EPT_SVE_SHIFT 36
498#define PGM_PTATTRS_EPT_SVE_MASK RT_BIT_64(PGM_PTATTRS_EPT_SVE_SHIFT)
499/** Reserved (bits 62:37) - unused. */
500#define PGM_PTATTRS_RSVD_62_37_SHIFT 37
501#define PGM_PTATTRS_RSVD_62_37_MASK UINT64_C(0x7fffffe000000000)
502/** No-execute bit. */
503#define PGM_PTATTRS_NX_SHIFT 63
504#define PGM_PTATTRS_NX_MASK RT_BIT_64(PGM_PTATTRS_NX_SHIFT)
505
506RT_BF_ASSERT_COMPILE_CHECKS(PGM_PTATTRS_, UINT64_C(0), UINT64_MAX,
507 (R, W, US, PWT, PCD, A, D, PAT, G, RSVD_12_9, EPT_R, EPT_W, EPT_X_SUPER, EPT_MEMTYPE, EPT_IGNORE_PAT,
508 EPT_LEAF, EPT_A, EPT_D, EPT_X_USER, RSVD_29_24, EPT_VGP, EPT_PW, RSVD_32, EPT_SSS, EPT_SPP,
509 RSVD_35, EPT_SVE, RSVD_62_37, NX));
510
511/** The bit position where the EPT specific attributes begin. */
512#define PGM_PTATTRS_EPT_SHIFT PGM_PTATTRS_EPT_R_SHIFT
513/** The mask of EPT bits (bits 36:ATTR_SHIFT). In the future we might choose to
514 * use higher unused bits for something else, in that case adjust this mask. */
515#define PGM_PTATTRS_EPT_MASK UINT64_C(0x0000001fffffe000)
516
517/** The mask of all PGM page attribute bits for regular page-tables. */
518#define PGM_PTATTRS_PT_VALID_MASK ( PGM_PTATTRS_R_MASK \
519 | PGM_PTATTRS_W_MASK \
520 | PGM_PTATTRS_US_MASK \
521 | PGM_PTATTRS_PWT_MASK \
522 | PGM_PTATTRS_PCD_MASK \
523 | PGM_PTATTRS_A_MASK \
524 | PGM_PTATTRS_D_MASK \
525 | PGM_PTATTRS_PAT_MASK \
526 | PGM_PTATTRS_G_MASK \
527 | PGM_PTATTRS_NX_MASK)
528
529/** The mask of all PGM page attribute bits for EPT. */
530#define PGM_PTATTRS_EPT_VALID_MASK ( PGM_PTATTRS_EPT_R_MASK \
531 | PGM_PTATTRS_EPT_W_MASK \
532 | PGM_PTATTRS_EPT_X_SUPER_MASK \
533 | PGM_PTATTRS_EPT_MEMTYPE_MASK \
534 | PGM_PTATTRS_EPT_IGNORE_PAT_MASK \
535 | PGM_PTATTRS_EPT_LEAF_MASK \
536 | PGM_PTATTRS_EPT_A_MASK \
537 | PGM_PTATTRS_EPT_D_MASK \
538 | PGM_PTATTRS_EPT_X_USER_MASK \
539 | PGM_PTATTRS_EPT_VGP_MASK \
540 | PGM_PTATTRS_EPT_PW_MASK \
541 | PGM_PTATTRS_EPT_SSS_MASK \
542 | PGM_PTATTRS_EPT_SPP_MASK \
543 | PGM_PTATTRS_EPT_SVE_MASK)
544
545/* The mask of all PGM page attribute bits (combined). */
546#define PGM_PTATTRS_VALID_MASK (PGM_PTATTRS_PT_VALID_MASK | PGM_PTATTRS_EPT_VALID_MASK)
547
548/* Verify bits match the regular PT bits. */
549AssertCompile(PGM_PTATTRS_W_SHIFT == X86_PTE_BIT_RW);
550AssertCompile(PGM_PTATTRS_US_SHIFT == X86_PTE_BIT_US);
551AssertCompile(PGM_PTATTRS_PWT_SHIFT == X86_PTE_BIT_PWT);
552AssertCompile(PGM_PTATTRS_PCD_SHIFT == X86_PTE_BIT_PCD);
553AssertCompile(PGM_PTATTRS_A_SHIFT == X86_PTE_BIT_A);
554AssertCompile(PGM_PTATTRS_D_SHIFT == X86_PTE_BIT_D);
555AssertCompile(PGM_PTATTRS_PAT_SHIFT == X86_PTE_BIT_PAT);
556AssertCompile(PGM_PTATTRS_G_SHIFT == X86_PTE_BIT_G);
557AssertCompile(PGM_PTATTRS_W_MASK == X86_PTE_RW);
558AssertCompile(PGM_PTATTRS_US_MASK == X86_PTE_US);
559AssertCompile(PGM_PTATTRS_PWT_MASK == X86_PTE_PWT);
560AssertCompile(PGM_PTATTRS_PCD_MASK == X86_PTE_PCD);
561AssertCompile(PGM_PTATTRS_A_MASK == X86_PTE_A);
562AssertCompile(PGM_PTATTRS_D_MASK == X86_PTE_D);
563AssertCompile(PGM_PTATTRS_PAT_MASK == X86_PTE_PAT);
564AssertCompile(PGM_PTATTRS_G_MASK == X86_PTE_G);
565AssertCompile(PGM_PTATTRS_NX_MASK == X86_PTE_PAE_NX);
566
567/* Verify those EPT bits that must map 1:1 (after shifting). */
568AssertCompile(PGM_PTATTRS_EPT_R_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_READ);
569AssertCompile(PGM_PTATTRS_EPT_W_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_WRITE);
570AssertCompile(PGM_PTATTRS_EPT_X_SUPER_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_EXECUTE);
571AssertCompile(PGM_PTATTRS_EPT_IGNORE_PAT_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_IGNORE_PAT);
572AssertCompile(PGM_PTATTRS_EPT_X_USER_SHIFT - PGM_PTATTRS_EPT_SHIFT == EPT_E_BIT_USER_EXECUTE);
573/** @} */
574#endif /* VBOX_VMM_TARGET_X86 || DOXYGEN_RUNNING */
575
576#if defined(VBOX_VMM_TARGET_ARMV8) || defined(DOXYGEN_RUNNING)
577/** @name PGM_PTATTRS_XXX - PGM page-table attributes, ARMv8 edition.
578 *
579 * The translation tables on ARMv8 are complicated by compressed and index
580 * attributes as well as a myriade of feature dependent field interpretations.
581 *
582 * The stage 1 effective access attributes are placed in bits 41:32. A set of
583 * leaf bits are copied raw, but NSE had to be shifted down due to nG confusion.
584 *
585 * The stage 2 effective access attributes are placed in bit 31:24. Bits taken
586 * directly from the leaf translation table entry are shifted down 7 bits to
587 * avoid collision with similar bits from the stage 1 leaf.
588 *
589 * @{ */
590
591/** Stage 2, page/block: D - dirty flag. (shifted down 7) */
592#define PGM_PTATTRS_S2_D_SHIFT 0
593#define PGM_PTATTRS_S2_D_MASK RT_BIT_64(PGM_PTATTRS_S2_D_SHIFT)
594/** Stage 2, page/block: AF - access flag. (shifted down 7) */
595#define PGM_PTATTRS_S2_AF_SHIFT 3
596#define PGM_PTATTRS_S2_AF_MASK RT_BIT_64(PGM_PTATTRS_S2_AF_SHIFT)
597/** Page/block level: NS - Non-secure. */
598#define PGM_PTATTRS_NS_SHIFT 5
599#define PGM_PTATTRS_NS_MASK RT_BIT_64(PGM_PTATTRS_NS_SHIFT)
600/** Page/block level: NSE - Non-secure extension (?) - FEAT_RME, FEAT_SEL2.
601 * @note Overlaps with nG, shifted down. */
602#define PGM_PTATTRS_NSE_SHIFT 6
603#define PGM_PTATTRS_NSE_MASK RT_BIT_64(PGM_PTATTRS_NSE_SHIFT)
604/** Page/block level: nD - Not dirty. */
605#define PGM_PTATTRS_ND_SHIFT 7
606#define PGM_PTATTRS_ND_MASK RT_BIT_64(PGM_PTATTRS_AF_SHIFT)
607/** Stage 2, page/block: nT, FEAT_BBM. Only supported with 64KB page size. */
608#define PGM_PTATTRS_S2_NT_SHIFT 9
609#define PGM_PTATTRS_S2_NT_MASK RT_BIT_64(PGM_PTATTRS_NT_SHIFT)
610/** Combined: AF - Access flag.
611 * @note The table and page/block AF attributes ANDed together. */
612#define PGM_PTATTRS_AF_SHIFT 10
613#define PGM_PTATTRS_AF_MASK RT_BIT_64(PGM_PTATTRS_AF_SHIFT)
614/** Page/block level: nG - Not global nG bit. */
615#define PGM_PTATTRS_NG_SHIFT 11
616#define PGM_PTATTRS_NG_MASK RT_BIT_64(PGM_PTATTRS_NG_SHIFT)
617/** Page/block level: nT, FEAT_BBM. Only supported with 64KB page size. */
618#define PGM_PTATTRS_NT_SHIFT 16
619#define PGM_PTATTRS_NT_MASK RT_BIT_64(PGM_PTATTRS_NT_SHIFT)
620
621/** Stage 2: Read access. */
622#define PGM_PTATTRS_S2_R_SHIFT 24
623#define PGM_PTATTRS_S2_R_MASK RT_BIT_64(PGM_PTATTRS_S2_UX_SHIFT)
624/** Stage 2: Full write access. */
625#define PGM_PTATTRS_S2_W_SHIFT 25
626#define PGM_PTATTRS_S2_W_MASK RT_BIT_64(PGM_PTATTRS_S2_UX_SHIFT)
627/** Stage 2: Privileged execution access. */
628#define PGM_PTATTRS_S2_PX_SHIFT 26
629#define PGM_PTATTRS_S2_PX_MASK RT_BIT_64(PGM_PTATTRS_S2_UX_SHIFT)
630/** Stage 2: Unprivileged execution access. */
631#define PGM_PTATTRS_S2_UX_SHIFT 27
632#define PGM_PTATTRS_S2_UX_MASK RT_BIT_64(PGM_PTATTRS_S2_UX_SHIFT)
633/** Stage 2: Limited write access - only MMU and RCW. */
634#define PGM_PTATTRS_S2_W_LIM_SHIFT 28
635#define PGM_PTATTRS_S2_W_LIM_MASK RT_BIT_64(PGM_PTATTRS_S2_UX_SHIFT)
636/** Stage 2: TopLevel0 - only used with PGM_PTATTRS_S2_W_LIM_MASK. */
637#define PGM_PTATTRS_S2_TL0_SHIFT 29
638#define PGM_PTATTRS_S2_TL0_MASK RT_BIT_64(PGM_PTATTRS_S2_TL0_SHIFT)
639/** Stage 2: TopLevel1 - only used with PGM_PTATTRS_S2_W_LIM_MASK. */
640#define PGM_PTATTRS_S2_TL1_SHIFT 30
641#define PGM_PTATTRS_S2_TL1_MASK RT_BIT_64(PGM_PTATTRS_S2_TL1_SHIFT)
642/** Stage 2: Device memory type. */
643#define PGM_PTATTRS_S2_DEVICE_SHIFT 31
644#define PGM_PTATTRS_S2_DEVICE_MASK RT_BIT_64(PGM_PTATTRS_S2_DEVICE_SHIFT)
645
646/** Stage 1: Privileged read access. */
647#define PGM_PTATTRS_PR_SHIFT 32
648#define PGM_PTATTRS_PR_MASK RT_BIT_64(PGM_PTATTRS_PR_SHIFT)
649/** Stage 1: Privileged write access. */
650#define PGM_PTATTRS_PW_SHIFT 33
651#define PGM_PTATTRS_PW_MASK RT_BIT_64(PGM_PTATTRS_PW_SHIFT)
652/** Stage 1: Privileged execute access. */
653#define PGM_PTATTRS_PX_SHIFT 34
654#define PGM_PTATTRS_PX_MASK RT_BIT_64(PGM_PTATTRS_PX_SHIFT)
655/** Stage 1: Privileged guarded control stack (GCS) access. */
656#define PGM_PTATTRS_PGCS_SHIFT 35
657#define PGM_PTATTRS_PGCS_MASK RT_BIT_64(PGM_PTATTRS_PGCS_SHIFT)
658/** Stage 1: Unprivileged read access. */
659#define PGM_PTATTRS_UR_SHIFT 36
660#define PGM_PTATTRS_UR_MASK RT_BIT_64(PGM_PTATTRS_UR_SHIFT)
661/** Stage 1: Unprivileged write access. */
662#define PGM_PTATTRS_UW_SHIFT 37
663#define PGM_PTATTRS_UW_MASK RT_BIT_64(PGM_PTATTRS_UW_SHIFT)
664/** Stage 1: Unprivileged execute access. */
665#define PGM_PTATTRS_UX_SHIFT 38
666#define PGM_PTATTRS_UX_MASK RT_BIT_64(PGM_PTATTRS_UX_SHIFT)
667/** Stage 1: Unprivileged guarded control stack (GCS) access. */
668#define PGM_PTATTRS_UGCS_SHIFT 39
669#define PGM_PTATTRS_UGCS_MASK RT_BIT_64(PGM_PTATTRS_UGCS_SHIFT)
670/** Stage 1: Privileged write-implies-no-execute access.
671 * @todo not sure if we need expose this bit. */
672#define PGM_PTATTRS_PWXN_SHIFT 40
673#define PGM_PTATTRS_PWXN_MASK RT_BIT_64(PGM_PTATTRS_PWXN_SHIFT)
674/** Stage 1: Unprivileged write-implies-no-execute access.
675 * @todo not sure if we need expose this bit. */
676#define PGM_PTATTRS_UWXN_SHIFT 41
677#define PGM_PTATTRS_UWXN_MASK RT_BIT_64(PGM_PTATTRS_UWXN_SHIFT)
678/** Stage 1: Device memory type. */
679#define PGM_PTATTRS_DEVICE_SHIFT 42
680#define PGM_PTATTRS_DEVICE_MASK RT_BIT_64(PGM_PTATTRS_DEVICE_SHIFT)
681
682/** Page/block level: Guarded page */
683#define PGM_PTATTRS_GP_SHIFT 50
684#define PGM_PTATTRS_GP_MASK RT_BIT_64(PGM_PTATTRS_GP_SHIFT)
685/** Stage 2, page/block: AssuredOnly. (shifted down 7 bits) */
686#define PGM_PTATTRS_S2_AO_SHIFT 51
687#define PGM_PTATTRS_S2_AO_MASK RT_BIT_64(PGM_PTATTRS_S2_TL1_SHIFT)
688/** Stage 2, page/block: Alternate MECID (encryption related). (shifted down
689 * 7 bits) */
690#define PGM_PTATTRS_S2_AMEC_SHIFT 56
691#define PGM_PTATTRS_S2_AMEC_MASK RT_BIT_64(PGM_PTATTRS_S2_AMEC_SHIFT)
692/** Page/block level: Alternate MECID (encryption related). */
693#define PGM_PTATTRS_AMEC_SHIFT 63
694#define PGM_PTATTRS_AMEC_MASK RT_BIT_64(PGM_PTATTRS_AMEC_SHIFT)
695
696/** Stage 1 page/block level bits that are copied raw. */
697#define PGM_PTATTRS_S1_LEAF_MASK ( PGM_PTATTRS_NS_MASK \
698 /*| PGM_PTATTRS_NSE_MASK shifted */ \
699 | PGM_PTATTRS_AF_MASK \
700 | PGM_PTATTRS_NG_MASK \
701 | PGM_PTATTRS_NT_MASK \
702 | PGM_PTATTRS_GP_MASK \
703 | PGM_PTATTRS_AMEC_MASK )
704
705/** Stage 2 page/block level entry shift down count. */
706#define PGM_PTATTRS_S2_LEAF_SHIFT 7
707/** Stage 2 page/block level entry mask of shifted down bits copied. */
708#define PGM_PTATTRS_S2_LEAF_MASK ( PGM_PTATTRS_S2_D_MASK \
709 | PGM_PTATTRS_S2_AF_MASK \
710 | PGM_PTATTRS_S2_NT_MASK \
711 | PGM_PTATTRS_S2_AO_MASK \
712 | PGM_PTATTRS_S2_AMEC_MASK )
713
714/** @} */
715#endif /* VBOX_VMM_TARGET_ARMV8 || DOXYGEN_RUNNING */
716
717
718
719/**
720 * Page table walk information.
721 *
722 * This provides extensive information regarding page faults (or EPT
723 * violations/misconfigurations) while traversing page tables.
724 */
725typedef struct PGMPTWALK
726{
727 /** The linear address that is being resolved (input). */
728 RTGCPTR GCPtr;
729
730 /** The second-level (/ stage 2) physical address (input/output).
731 * @remarks only valid if fIsSlat is set. */
732 RTGCPHYS GCPhysNested;
733
734 /** The physical address that is the result of the walk (output). */
735 RTGCPHYS GCPhys;
736
737 /** Set if the walk succeeded. */
738 bool fSucceeded;
739 /** Whether this is a second-level (/ stage 2) address translation. */
740 bool fIsSlat;
741 /** Whether the linear address (GCPtr) caused the second-level
742 * address translation. */
743 bool fIsLinearAddrValid;
744 /** The level problem arrised at.
745 * @x86 PTE is level 1, PDE is level 2, PDPE is level 3, PML4 is level 4,
746 * CR3 is level 8. This is 0 on success.
747 * @arm64 TBD.
748 * @todo Check if anyone is using this and unify it between the platforms. */
749 uint8_t uLevel;
750 /** Set if the page isn't present. */
751 bool fNotPresent;
752 /** Encountered a bad physical address. */
753 bool fBadPhysAddr;
754 /** Set if there was reserved bit violations. */
755 bool fRsvdError;
756 /** Set if it involves a big page (2/4 MB). */
757 bool fBigPage;
758 /** Set if it involves a gigantic page (X86: 1 GB; ARM: ). */
759 bool fGigantPage;
760 bool afPadding[3];
761 /** Page-walk failure type, PGM_WALKFAIL_XXX. */
762 PGMWALKFAIL fFailed;
763
764 /** The effective page-table attributes, PGM_PTATTRS_XXX. */
765 PGMPTATTRS fEffective;
766} PGMPTWALK;
767/** Pointer to page walk information. */
768typedef PGMPTWALK *PPGMPTWALK;
769/** Pointer to const page walk information. */
770typedef PGMPTWALK const *PCPGMPTWALK;
771
772
773/** @name PGM_WALKINFO_XXX - flag based PGM page table walk info.
774 * @{ */
775/** Set if the walk succeeded. */
776#define PGM_WALKINFO_SUCCEEDED RT_BIT_32(0)
777/** Whether this is a second-level (/ stage 2) address translation. */
778#define PGM_WALKINFO_IS_SLAT RT_BIT_32(1)
779
780/** Set if it involves a big page.
781 * @x86 2MB (PAE+LM), 4MB (legacy).
782 * @arm64 Level 2 block - 2MB (gr=4KB v8), 32MB (gr=16KB v8), 512MB (gr=64KB
783 * v8), 1MB (gr=4K v9), 16MB (gr=16KB v9), 256MB (gr=64KB v9). */
784#define PGM_WALKINFO_BIG_PAGE RT_BIT_32(7)
785/** Set if it involves a gigantic page.
786 * @x86 1 GB.
787 * @arm64 Level 1 block - 1GB (gr=4KB v8), 256MB (gr=4KB v9), 16GB (gr=16KB
788 * v9), 1TB (gr=64KB v9). */
789#define PGM_WALKINFO_GIGANTIC_PAGE RT_BIT_32(8)
790
791/** @todo Add a level 0 block flag for ARM/VMSAv9. */
792
793/** Whether the linear address (GCPtr) caused the second-level
794 * address translation - read the code to figure this one.
795 * @todo for PGMPTWALKFAST::fFailed? */
796#define PGM_WALKINFO_IS_LINEAR_ADDR_VALID RT_BIT_32(10)
797/** @} */
798
799/**
800 * Fast page table walk information.
801 *
802 * This is a slimmed down version of PGMPTWALK for use by IEM.
803 */
804typedef struct PGMPTWALKFAST
805{
806 /** The linear address that is being resolved (input). */
807 RTGCPTR GCPtr;
808
809 /** The physical address that is the result of the walk (output).
810 * This includes the offset mask from the GCPtr input value. */
811 RTGCPHYS GCPhys;
812
813 /** The second-level physical address (input/output).
814 * @remarks only valid if fIsSlat is set. */
815 RTGCPHYS GCPhysNested;
816
817 /** Walk information PGM_WALKINFO_XXX (output). */
818 uint32_t fInfo;
819 /** Page-walk failure type, PGM_WALKFAIL_XXX (output). */
820 PGMWALKFAIL fFailed;
821
822 /** The effective page-table attributes, PGM_PTATTRS_XXX (output). */
823 PGMPTATTRS fEffective;
824} PGMPTWALKFAST;
825/** Pointer to fast page walk information. */
826typedef PGMPTWALKFAST *PPGMPTWALKFAST;
827/** Pointer to const fast page walk information. */
828typedef PGMPTWALKFAST const *PCPGMPTWALKFAST;
829
830#define PGMPTWALKFAST_ZERO(a_pWalkFast) do { \
831 (a_pWalkFast)->GCPtr = 0; \
832 (a_pWalkFast)->GCPhys = 0; \
833 (a_pWalkFast)->GCPhysNested = 0; \
834 (a_pWalkFast)->fInfo = 0; \
835 (a_pWalkFast)->fFailed = 0; \
836 (a_pWalkFast)->fEffective = 0; \
837 } while (0)
838
839
840#ifndef VBOX_VMM_TARGET_ARMV8
841/** Macro for checking if the guest is using paging.
842 * @param enmMode PGMMODE_*.
843 * @remark ASSUMES certain order of the PGMMODE_* values.
844 */
845# define PGMMODE_WITH_PAGING(enmMode) ((enmMode) >= PGMMODE_32_BIT)
846
847/** Macro for checking if it's one of the long mode modes.
848 * @param enmMode PGMMODE_*.
849 */
850# define PGMMODE_IS_64BIT_MODE(enmMode) ((enmMode) == PGMMODE_AMD64_NX || (enmMode) == PGMMODE_AMD64)
851
852/** Macro for checking if it's one of the AMD64 nested modes.
853 * @param enmMode PGMMODE_*.
854 */
855# define PGMMODE_IS_NESTED(enmMode) ( (enmMode) == PGMMODE_NESTED_32BIT \
856 || (enmMode) == PGMMODE_NESTED_PAE \
857 || (enmMode) == PGMMODE_NESTED_AMD64)
858
859/** Macro for checking if it's one of the PAE modes.
860 * @param enmMode PGMMODE_*.
861 */
862# define PGMMODE_IS_PAE(enmMode) ( (enmMode) == PGMMODE_PAE \
863 || (enmMode) == PGMMODE_PAE_NX)
864#else
865/** Macro for checking if the guest is using paging.
866 * @param enmMode PGMMODE_*.
867 * @remark ASSUMES certain order of the PGMMODE_* values.
868 */
869# define PGMMODE_WITH_PAGING(enmMode) ((enmMode) > PGMMODE_NONE)
870
871/** Macro for checking if it's the 64-bit translation mode.
872 * @param enmMode PGMMODE_*.
873 */
874# define PGMMODE_IS_64BIT_MODE(enmMode) ((enmMode) == PGMMODE_VMSA_V8_64)
875#endif
876
877
878/**
879 * Is the ROM mapped (true) or is the shadow RAM mapped (false).
880 *
881 * @returns boolean.
882 * @param enmProt The PGMROMPROT value, must be valid.
883 */
884#define PGMROMPROT_IS_ROM(enmProt) \
885 ( (enmProt) == PGMROMPROT_READ_ROM_WRITE_IGNORE \
886 || (enmProt) == PGMROMPROT_READ_ROM_WRITE_RAM )
887
888
889VMMDECL(bool) PGMIsLockOwner(PVMCC pVM);
890
891VMMDECL(int) PGMRegisterStringFormatTypes(void);
892VMMDECL(void) PGMDeregisterStringFormatTypes(void);
893VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu);
894VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTX pCtx, RTGCPTR pvFault);
895VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage);
896VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCPUCC pVCpu, RTGCPTR pvFault);
897VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
898VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags);
899VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags);
900VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags);
901/** @name Flags for PGMShwMakePageReadonly, PGMShwMakePageWritable and
902 * PGMShwMakePageNotPresent
903 * @{ */
904/** The call is from an access handler for dealing with the a faulting write
905 * operation. The virtual address is within the same page. */
906#define PGM_MK_PG_IS_WRITE_FAULT RT_BIT(0)
907/** The page is an MMIO2. */
908#define PGM_MK_PG_IS_MMIO2 RT_BIT(1)
909/** @}*/
910VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk);
911/** @name PGMQPAGE_F_XXX - Flags for PGMGstQueryPageFast
912 * @{ */
913/** Querying for read access, set A bits accordingly. */
914#define PGMQPAGE_F_READ RT_BIT_32(0)
915/** Querying for write access, set A bits and D bit accordingly.
916 * Don't set leaf entry bits if is read-only. */
917#define PGMQPAGE_F_WRITE RT_BIT_32(1)
918/** Querying for execute access, set A bits accordingly. */
919#define PGMQPAGE_F_EXECUTE RT_BIT_32(2)
920/** The query is for a user mode access, so don't set leaf A or D bits
921 * unless the effective access allows usermode access.
922 * Assume supervisor (priveleged) access when not set. */
923#define PGMQPAGE_F_USER_MODE RT_BIT_32(3)
924#if defined( VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING)
925/** X86: Treat CR0.WP as zero when evalutating the access.
926 * @note Same value as X86_CR0_WP. */
927# define PGMQPAGE_F_CR0_WP0 RT_BIT_32(16)
928#endif
929/** @todo ARM: security, s2, GCS, ++ */
930/** The valid flag mask. */
931#if defined( VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING)
932# define PGMQPAGE_F_VALID_MASK UINT32_C(0x0001000f)
933#else
934# define PGMQPAGE_F_VALID_MASK UINT32_C(0x0000000f)
935#endif
936/** @} */
937VMM_INT_DECL(int) PGMGstQueryPageFast(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags, PPGMPTWALKFAST pWalkFast);
938VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
939VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes);
940VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes);
941VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3);
942
943VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage);
944#if defined(VBOX_VMM_TARGET_X86) || defined(VBOX_VMM_TARGET_AGNOSTIC)
945VMM_INT_DECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal);
946VMM_INT_DECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
947VMM_INT_DECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3);
948VMM_INT_DECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce);
949VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce);
950#elif defined(VBOX_VMM_TARGET_ARMV8)
951VMM_INT_DECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint8_t bEl, uint64_t u64RegSctlr, uint64_t u64RegTcr);
952#endif
953VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu);
954VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu);
955VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu);
956VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM);
957VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode);
958#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
959VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode);
960#endif
961VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe);
962VMMDECL(bool) PGMHasDirtyPages(PVM pVM);
963VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr);
964
965/** PGM physical access handler type registration handle (heap offset, valid
966 * cross contexts without needing fixing up). Callbacks and handler type is
967 * associated with this and it is shared by all handler registrations. */
968typedef uint64_t PGMPHYSHANDLERTYPE;
969/** Pointer to a PGM physical handler type registration handle. */
970typedef PGMPHYSHANDLERTYPE *PPGMPHYSHANDLERTYPE;
971/** NIL value for PGM physical access handler type handle. */
972#define NIL_PGMPHYSHANDLERTYPE UINT64_MAX
973VMMDECL(int) PGMHandlerPhysicalRegister(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPHYSHANDLERTYPE hType,
974 uint64_t uUser, R3PTRTYPE(const char *) pszDesc);
975VMMDECL(int) PGMHandlerPhysicalRegisterVmxApicAccessPage(PVMCC pVM, RTGCPHYS GCPhys, PGMPHYSHANDLERTYPE hType);
976VMMDECL(int) PGMHandlerPhysicalModify(PVMCC pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast);
977VMMDECL(int) PGMHandlerPhysicalDeregister(PVMCC pVM, RTGCPHYS GCPhys);
978VMMDECL(int) PGMHandlerPhysicalChangeUserArg(PVMCC pVM, RTGCPHYS GCPhys, uint64_t uUser);
979VMMDECL(int) PGMHandlerPhysicalSplit(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit);
980VMMDECL(int) PGMHandlerPhysicalJoin(PVMCC pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2);
981VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage);
982VMMDECL(int) PGMHandlerPhysicalPageAliasMmio2(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage,
983 PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMMio2PageRemap);
984VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap);
985VMMDECL(int) PGMHandlerPhysicalReset(PVMCC pVM, RTGCPHYS GCPhys);
986VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVMCC pVM, RTGCPHYS GCPhys);
987
988/** @name PGMPHYSHANDLER_F_XXX - flags for PGMR3HandlerPhysicalTypeRegister and PGMR0HandlerPhysicalTypeRegister
989 * @{ */
990/** Whether to hold the PGM lock while calling the handler or not.
991 * Mainly an optimization for PGM callers. */
992#define PGMPHYSHANDLER_F_KEEP_PGM_LOCK RT_BIT_32(0)
993/** The uUser value is a ring-0 device instance index that needs translating
994 * into a PDMDEVINS pointer before calling the handler. This is a hack to make
995 * it possible to use access handlers in devices. */
996#define PGMPHYSHANDLER_F_R0_DEVINS_IDX RT_BIT_32(1)
997/** Don't apply the access handler to VT-x and AMD-V. Only works with full pages.
998 * This is a trick for the VT-x APIC access page in nested VT-x setups. */
999#define PGMPHYSHANDLER_F_NOT_IN_HM RT_BIT_32(2)
1000/** Mask of valid bits. */
1001#define PGMPHYSHANDLER_F_VALID_MASK UINT32_C(7)
1002/** @} */
1003
1004
1005/**
1006 * Page type.
1007 *
1008 * @remarks This enum has to fit in a 3-bit field (see PGMPAGE::u3Type).
1009 * @remarks This is used in the saved state, so changes to it requires bumping
1010 * the saved state version.
1011 * @todo So, convert to \#defines!
1012 */
1013typedef enum PGMPAGETYPE
1014{
1015 /** The usual invalid zero entry. */
1016 PGMPAGETYPE_INVALID = 0,
1017 /** RAM page. (RWX) */
1018 PGMPAGETYPE_RAM,
1019 /** MMIO2 page. (RWX) */
1020 PGMPAGETYPE_MMIO2,
1021 /** MMIO2 page aliased over an MMIO page. (RWX)
1022 * See PGMHandlerPhysicalPageAlias(). */
1023 PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1024 /** Special page aliased over an MMIO page. (RWX)
1025 * See PGMHandlerPhysicalPageAliasHC(), but this is generally only used for
1026 * VT-x's APIC access page at the moment. Treated as MMIO by everyone except
1027 * the shadow paging code. */
1028 PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
1029 /** Shadowed ROM. (RWX) */
1030 PGMPAGETYPE_ROM_SHADOW,
1031 /** ROM page. (R-X) */
1032 PGMPAGETYPE_ROM,
1033 /** MMIO page. (---) */
1034 PGMPAGETYPE_MMIO,
1035 /** End of valid entries. */
1036 PGMPAGETYPE_END
1037} PGMPAGETYPE;
1038AssertCompile(PGMPAGETYPE_END == 8);
1039
1040/** @name PGM page type predicates.
1041 * @{ */
1042#define PGMPAGETYPE_IS_READABLE(a_enmType) ( (a_enmType) <= PGMPAGETYPE_ROM )
1043#define PGMPAGETYPE_IS_WRITEABLE(a_enmType) ( (a_enmType) <= PGMPAGETYPE_ROM_SHADOW )
1044#define PGMPAGETYPE_IS_RWX(a_enmType) ( (a_enmType) <= PGMPAGETYPE_ROM_SHADOW )
1045#define PGMPAGETYPE_IS_ROX(a_enmType) ( (a_enmType) == PGMPAGETYPE_ROM )
1046#define PGMPAGETYPE_IS_NP(a_enmType) ( (a_enmType) == PGMPAGETYPE_MMIO )
1047/** @} */
1048
1049/**
1050 * A physical memory range.
1051 *
1052 * @note This layout adheres to to GIM Hyper-V specs (asserted while compiling
1053 * GIM Hyper-V that uses the PGM API).
1054 */
1055typedef struct PGMPHYSRANGE
1056{
1057 /** The first address in the range. */
1058 RTGCPHYS GCPhysStart;
1059 /** The number of pages in the range. */
1060 uint64_t cPages;
1061} PGMPHYSRANGE;
1062AssertCompileSize(PGMPHYSRANGE, 16);
1063
1064/**
1065 * A list of physical memory ranges.
1066 *
1067 * @note This layout adheres to to GIM Hyper-V specs (asserted while compiling
1068 * GIM Hyper-V that uses the PGM API).
1069 */
1070typedef struct PGMPHYSRANGES
1071{
1072 /** The number of ranges in the list. */
1073 uint64_t cRanges;
1074 /** Array of physical memory ranges. */
1075 RT_FLEXIBLE_ARRAY_EXTENSION
1076 PGMPHYSRANGE aRanges[RT_FLEXIBLE_ARRAY];
1077} PGMPHYSRANGES;
1078/** Pointer to a list of physical memory ranges. */
1079typedef PGMPHYSRANGES *PPGMPHYSRANGES;
1080/** Pointer to a const list of physical memory ranges. */
1081typedef PGMPHYSRANGES const *PCPGMPHYSRANGES;
1082
1083
1084VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys);
1085
1086VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys);
1087VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys);
1088VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock);
1089VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock);
1090VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock);
1091VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock);
1092
1093VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu);
1094VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys);
1095VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys);
1096VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys);
1097VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock);
1098VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLock);
1099
1100/** @def PGM_PHYS_RW_IS_SUCCESS
1101 * Check whether a PGMPhysRead, PGMPhysWrite, PGMPhysReadGCPtr or
1102 * PGMPhysWriteGCPtr call completed the given task.
1103 *
1104 * @returns true if completed, false if not.
1105 * @param a_rcStrict The status code.
1106 * @sa IOM_SUCCESS
1107 */
1108#ifdef IN_RING3
1109# define PGM_PHYS_RW_IS_SUCCESS(a_rcStrict) \
1110 ( (a_rcStrict) == VINF_SUCCESS \
1111 || (a_rcStrict) == VINF_EM_DBG_STOP \
1112 || (a_rcStrict) == VINF_EM_DBG_EVENT \
1113 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
1114 )
1115#elif defined(IN_RING0)
1116# define PGM_PHYS_RW_IS_SUCCESS(a_rcStrict) \
1117 ( (a_rcStrict) == VINF_SUCCESS \
1118 || (a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE \
1119 || (a_rcStrict) == VINF_EM_OFF \
1120 || (a_rcStrict) == VINF_EM_SUSPEND \
1121 || (a_rcStrict) == VINF_EM_RESET \
1122 || (a_rcStrict) == VINF_EM_HALT \
1123 || (a_rcStrict) == VINF_EM_DBG_STOP \
1124 || (a_rcStrict) == VINF_EM_DBG_EVENT \
1125 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
1126 )
1127#elif defined(IN_RC)
1128# define PGM_PHYS_RW_IS_SUCCESS(a_rcStrict) \
1129 ( (a_rcStrict) == VINF_SUCCESS \
1130 || (a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE \
1131 || (a_rcStrict) == VINF_EM_OFF \
1132 || (a_rcStrict) == VINF_EM_SUSPEND \
1133 || (a_rcStrict) == VINF_EM_RESET \
1134 || (a_rcStrict) == VINF_EM_HALT \
1135 || (a_rcStrict) == VINF_SELM_SYNC_GDT \
1136 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT \
1137 || (a_rcStrict) == VINF_EM_DBG_STOP \
1138 || (a_rcStrict) == VINF_EM_DBG_EVENT \
1139 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
1140 )
1141#endif
1142/** @def PGM_PHYS_RW_DO_UPDATE_STRICT_RC
1143 * Updates the return code with a new result.
1144 *
1145 * Both status codes must be successes according to PGM_PHYS_RW_IS_SUCCESS.
1146 *
1147 * @param a_rcStrict The current return code, to be updated.
1148 * @param a_rcStrict2 The new return code to merge in.
1149 */
1150#ifdef IN_RING3
1151# define PGM_PHYS_RW_DO_UPDATE_STRICT_RC(a_rcStrict, a_rcStrict2) \
1152 do { \
1153 Assert(rcStrict == VINF_SUCCESS); \
1154 Assert(rcStrict2 == VINF_SUCCESS); \
1155 } while (0)
1156#elif defined(IN_RING0)
1157# define PGM_PHYS_RW_DO_UPDATE_STRICT_RC(a_rcStrict, a_rcStrict2) \
1158 do { \
1159 Assert(PGM_PHYS_RW_IS_SUCCESS(rcStrict)); \
1160 Assert(PGM_PHYS_RW_IS_SUCCESS(rcStrict2)); \
1161 AssertCompile(VINF_IOM_R3_MMIO_COMMIT_WRITE > VINF_EM_LAST); \
1162 if ((a_rcStrict2) == VINF_SUCCESS || (a_rcStrict) == (a_rcStrict2)) \
1163 { /* likely */ } \
1164 else if ( (a_rcStrict) == VINF_SUCCESS \
1165 || (a_rcStrict) > (a_rcStrict2)) \
1166 (a_rcStrict) = (a_rcStrict2); \
1167 } while (0)
1168#elif defined(IN_RC)
1169# define PGM_PHYS_RW_DO_UPDATE_STRICT_RC(a_rcStrict, a_rcStrict2) \
1170 do { \
1171 Assert(PGM_PHYS_RW_IS_SUCCESS(rcStrict)); \
1172 Assert(PGM_PHYS_RW_IS_SUCCESS(rcStrict2)); \
1173 AssertCompile(VINF_SELM_SYNC_GDT > VINF_EM_LAST); \
1174 AssertCompile(VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT > VINF_EM_LAST); \
1175 AssertCompile(VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT < VINF_SELM_SYNC_GDT); \
1176 AssertCompile(VINF_IOM_R3_MMIO_COMMIT_WRITE > VINF_EM_LAST); \
1177 AssertCompile(VINF_IOM_R3_MMIO_COMMIT_WRITE > VINF_SELM_SYNC_GDT); \
1178 AssertCompile(VINF_IOM_R3_MMIO_COMMIT_WRITE > VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT); \
1179 if ((a_rcStrict2) == VINF_SUCCESS || (a_rcStrict) == (a_rcStrict2)) \
1180 { /* likely */ } \
1181 else if ((a_rcStrict) == VINF_SUCCESS) \
1182 (a_rcStrict) = (a_rcStrict2); \
1183 else if ( ( (a_rcStrict) > (a_rcStrict2) \
1184 && ( (a_rcStrict2) <= VINF_EM_RESET \
1185 || (a_rcStrict) != VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT) ) \
1186 || ( (a_rcStrict2) == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT \
1187 && (a_rcStrict) > VINF_EM_RESET) ) \
1188 (a_rcStrict) = (a_rcStrict2); \
1189 } while (0)
1190#endif
1191
1192VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin);
1193VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin);
1194VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin);
1195VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin);
1196
1197VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb);
1198VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb);
1199VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb);
1200VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb);
1201VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb);
1202
1203VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers, void **ppv, PPGMPAGEMAPLOCK pLock);
1204VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers);
1205VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
1206 R3R0PTRTYPE(uint8_t *) *ppb, uint64_t *pfTlb);
1207/** @name Flags returned by PGMPhysIemGCPhys2PtrNoLock
1208 * @{ */
1209/** @def PGMIEMGCPHYS2PTR_F_NO_WRITE
1210 * Not writable (IEMTLBE_F_PG_NO_WRITE). */
1211/** @def PGMIEMGCPHYS2PTR_F_NO_READ
1212 * Not readable (IEMTLBE_F_PG_NO_READ). */
1213/** @def PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3
1214 * No ring-3 mapping (IEMTLBE_F_NO_MAPPINGR3). */
1215/** @def PGMIEMGCPHYS2PTR_F_UNASSIGNED
1216 * Unassigned memory (IEMTLBE_F_PG_UNASSIGNED). */
1217/** @def PGMIEMGCPHYS2PTR_F_CODE_PAGE
1218 * Write monitored IEM code page (IEMTLBE_F_PG_CODE_PAGE). */
1219#if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING)
1220# define PGMIEMGCPHYS2PTR_F_NO_WRITE RT_BIT_32(3)
1221# define PGMIEMGCPHYS2PTR_F_NO_READ RT_BIT_32(4)
1222# define PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 RT_BIT_32(8)
1223# define PGMIEMGCPHYS2PTR_F_UNASSIGNED RT_BIT_32(9)
1224# define PGMIEMGCPHYS2PTR_F_CODE_PAGE RT_BIT_32(10)
1225#elif defined(VBOX_VMM_TARGET_ARMV8)
1226# define PGMIEMGCPHYS2PTR_F_NO_READ RT_BIT_32(13)
1227# define PGMIEMGCPHYS2PTR_F_NO_WRITE RT_BIT_32(14)
1228# define PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 RT_BIT_32(15)
1229# define PGMIEMGCPHYS2PTR_F_UNASSIGNED RT_BIT_32(16)
1230# define PGMIEMGCPHYS2PTR_F_CODE_PAGE RT_BIT_32(17)
1231#endif
1232/** @} */
1233
1234/** Information returned by PGMPhysNemQueryPageInfo. */
1235typedef struct PGMPHYSNEMPAGEINFO
1236{
1237 /** The host physical address of the page, NIL_HCPHYS if invalid page. */
1238 RTHCPHYS HCPhys;
1239 /** The NEM access mode for the page, NEM_PAGE_PROT_XXX */
1240 uint32_t fNemProt : 8;
1241 /** The NEM state associated with the PAGE. */
1242 uint32_t u2NemState : 2;
1243 /** The NEM state associated with the PAGE before pgmPhysPageMakeWritable was called. */
1244 uint32_t u2OldNemState : 2;
1245 /** Set if the page has handler. */
1246 uint32_t fHasHandlers : 1;
1247 /** Set if is the zero page backing it. */
1248 uint32_t fZeroPage : 1;
1249 /** Set if the page has handler. */
1250 PGMPAGETYPE enmType;
1251} PGMPHYSNEMPAGEINFO;
1252/** Pointer to page information for NEM. */
1253typedef PGMPHYSNEMPAGEINFO *PPGMPHYSNEMPAGEINFO;
1254/**
1255 * Callback for checking that the page is in sync while under the PGM lock.
1256 *
1257 * NEM passes this callback to PGMPhysNemQueryPageInfo to check that the page is
1258 * in-sync between PGM and the native hypervisor API in an atomic fashion.
1259 *
1260 * @returns VBox status code.
1261 * @param pVM The cross context VM structure.
1262 * @param pVCpu The cross context per virtual CPU structure. Optional,
1263 * see PGMPhysNemQueryPageInfo.
1264 * @param GCPhys The guest physical address (not A20 masked).
1265 * @param pInfo The page info structure. This function updates the
1266 * u2NemState memory and the caller will update the PGMPAGE
1267 * copy accordingly.
1268 * @param pvUser Callback user argument.
1269 */
1270typedef DECLCALLBACKTYPE(int, FNPGMPHYSNEMCHECKPAGE,(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser));
1271/** Pointer to a FNPGMPHYSNEMCHECKPAGE function. */
1272typedef FNPGMPHYSNEMCHECKPAGE *PFNPGMPHYSNEMCHECKPAGE;
1273
1274VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable,
1275 PPGMPHYSNEMPAGEINFO pInfo, PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser);
1276
1277/**
1278 * Callback for use with PGMPhysNemEnumPagesByState.
1279 * @returns VBox status code.
1280 * Failure status will stop enumeration immediately and return.
1281 * @param pVM The cross context VM structure.
1282 * @param pVCpu The cross context per virtual CPU structure. Optional,
1283 * see PGMPhysNemEnumPagesByState.
1284 * @param GCPhys The guest physical address (not A20 masked).
1285 * @param pu2NemState Pointer to variable with the NEM state. This can be
1286 * update.
1287 * @param pvUser The user argument.
1288 */
1289typedef DECLCALLBACKTYPE(int, FNPGMPHYSNEMENUMCALLBACK,(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys,
1290 uint8_t *pu2NemState, void *pvUser));
1291/** Pointer to a FNPGMPHYSNEMENUMCALLBACK function. */
1292typedef FNPGMPHYSNEMENUMCALLBACK *PFNPGMPHYSNEMENUMCALLBACK;
1293VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC VCpu, uint8_t uMinState,
1294 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser);
1295
1296
1297#ifdef VBOX_STRICT
1298VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVMCC pVM);
1299VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM);
1300VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4);
1301#endif /* VBOX_STRICT */
1302
1303VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages);
1304
1305/**
1306 * Query large page usage state
1307 *
1308 * @returns 0 - disabled, 1 - enabled
1309 * @param pVM The cross context VM structure.
1310 */
1311#define PGMIsUsingLargePages(pVM) ((pVM)->pgm.s.fUseLargePages)
1312
1313
1314/** @defgroup grp_pgm_r0 The PGM Host Context Ring-0 API
1315 * @{
1316 */
1317#ifdef IN_RING0
1318VMMR0_INT_DECL(int) PGMR0InitPerVMData(PGVM pGVM, RTR0MEMOBJ hMemObj);
1319VMMR0_INT_DECL(int) PGMR0InitVM(PGVM pGVM);
1320VMMR0_INT_DECL(void) PGMR0DoneInitVM(PGVM pGVM);
1321VMMR0_INT_DECL(void) PGMR0CleanupVM(PGVM pGVM);
1322VMMR0_INT_DECL(int) PGMR0PhysAllocateHandyPages(PGVM pGVM, VMCPUID idCpu);
1323VMMR0_INT_DECL(int) PGMR0PhysFlushHandyPages(PGVM pGVM, VMCPUID idCpu);
1324VMMR0_INT_DECL(int) PGMR0PhysAllocateLargePage(PGVM pGVM, VMCPUID idCpu, RTGCPHYS GCPhys);
1325VMMR0_INT_DECL(int) PGMR0PhysMMIO2MapKernel(PGVM pGVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2,
1326 size_t offSub, size_t cbSub, void **ppvMapping);
1327VMMR0_INT_DECL(int) PGMR0PhysSetupIoMmu(PGVM pGVM);
1328VMMR0_INT_DECL(int) PGMR0PhysHandlerInitReqHandler(PGVM pGVM, uint32_t cEntries);
1329
1330VMMR0_INT_DECL(int) PGMR0HandlerPhysicalTypeSetUpContext(PGVM pGVM, PGMPHYSHANDLERKIND enmKind, uint32_t fFlags,
1331 PFNPGMPHYSHANDLER pfnHandler, PFNPGMRZPHYSPFHANDLER pfnPfHandler,
1332 const char *pszDesc, PGMPHYSHANDLERTYPE hType);
1333
1334VMMR0DECL(int) PGMR0SharedModuleCheck(PVMCC pVM, PGVM pGVM, VMCPUID idCpu, PGMMSHAREDMODULE pModule,
1335 PCRTGCPTR64 paRegionsGCPtrs);
1336VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PGVM pGVM, PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
1337 PCPUMCTX pCtx, RTGCPHYS pvFault);
1338VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PGVM pGVM, PGVMCPU pGVCpu, PGMMODE enmShwPagingMode,
1339 PCPUMCTX pCtx, RTGCPHYS GCPhysFault, uint32_t uErr);
1340VMMR0_INT_DECL(int) PGMR0PoolGrow(PGVM pGVM, VMCPUID idCpu);
1341
1342# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1343VMMR0DECL(VBOXSTRICTRC) PGMR0NestedTrap0eHandlerNestedPaging(PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
1344 PCPUMCTX pCtx, RTGCPHYS GCPhysNestedFault,
1345 bool fIsLinearAddrValid, RTGCPTR GCPtrNestedFault, PPGMPTWALK pWalk);
1346# endif
1347#endif /* IN_RING0 */
1348
1349/**
1350 * Request buffer for PGMR0PhysAllocateRamRangeReq / VMMR0_DO_PGM_PHYS_ALLOCATE_RAM_RANGE
1351 */
1352typedef struct PGMPHYSALLOCATERAMRANGEREQ
1353{
1354 /** The header. */
1355 SUPVMMR0REQHDR Hdr;
1356 /** Input: the GUEST_PAGE_SIZE value (for validation). */
1357 uint32_t cbGuestPage;
1358 /** Input: Number of guest pages in the range. */
1359 uint32_t cGuestPages;
1360 /** Input: The RAM range flags (PGM_RAM_RANGE_FLAGS_XXX). */
1361 uint32_t fFlags;
1362 /** Output: The range identifier. */
1363 uint32_t idNewRange;
1364} PGMPHYSALLOCATERAMRANGEREQ;
1365/** Pointer to a PGMR0PhysAllocateRamRangeReq / VMMR0_DO_PGM_PHYS_ALLOCATE_RAM_RANGE request buffer. */
1366typedef PGMPHYSALLOCATERAMRANGEREQ *PPGMPHYSALLOCATERAMRANGEREQ;
1367
1368VMMR0_INT_DECL(int) PGMR0PhysAllocateRamRangeReq(PGVM pGVM, PPGMPHYSALLOCATERAMRANGEREQ pReq);
1369
1370
1371/**
1372 * Request buffer for PGMR0PhysMmio2RegisterReq / VMMR0_DO_PGM_PHYS_MMIO2_REGISTER
1373 */
1374typedef struct PGMPHYSMMIO2REGISTERREQ
1375{
1376 /** The header. */
1377 SUPVMMR0REQHDR Hdr;
1378 /** Input: the GUEST_PAGE_SIZE value (for validation). */
1379 uint32_t cbGuestPage;
1380 /** Input: Number of guest pages in the MMIO2 range. */
1381 uint32_t cGuestPages;
1382 /** Input: The MMIO2 ID of the first chunk. */
1383 uint8_t idMmio2;
1384 /** Input: The number of MMIO2 chunks needed. */
1385 uint8_t cChunks;
1386 /** Input: The sub-device number. */
1387 uint8_t iSubDev;
1388 /** Input: The device region number. */
1389 uint8_t iRegion;
1390 /** Input: Flags (PGMPHYS_MMIO2_FLAGS_XXX). */
1391 uint32_t fFlags;
1392 /** Input: The owner device key. */
1393 PPDMDEVINSR3 pDevIns;
1394} PGMPHYSMMIO2REGISTERREQ;
1395/** Pointer to a PGMR0PhysAllocateRamRangeReq / VMMR0_DO_PGM_PHYS_MMIO2_REGISTER request buffer. */
1396typedef PGMPHYSMMIO2REGISTERREQ *PPGMPHYSMMIO2REGISTERREQ;
1397
1398VMMR0_INT_DECL(int) PGMR0PhysMmio2RegisterReq(PGVM pGVM, PPGMPHYSMMIO2REGISTERREQ pReq);
1399
1400
1401/*
1402 * Request buffer for PGMR0PhysMmio2DeregisterReq / VMMR0_DO_PGM_PHYS_MMIO2_DEREGISTER
1403 */
1404typedef struct PGMPHYSMMIO2DEREGISTERREQ
1405{
1406 /** The header. */
1407 SUPVMMR0REQHDR Hdr;
1408 /** Input: The MMIO2 ID of the first chunk. */
1409 uint8_t idMmio2;
1410 /** Input: The number of MMIO2 chunks to free. */
1411 uint8_t cChunks;
1412 /** Input: Reserved and must be zero. */
1413 uint8_t abReserved[6];
1414 /** Input: The owner device key. */
1415 PPDMDEVINSR3 pDevIns;
1416} PGMPHYSMMIO2DEREGISTERREQ;
1417/** Pointer to a PGMR0PhysMmio2DeregisterReq / VMMR0_DO_PGM_PHYS_MMIO2_DEREGISTER request buffer. */
1418typedef PGMPHYSMMIO2DEREGISTERREQ *PPGMPHYSMMIO2DEREGISTERREQ;
1419
1420VMMR0_INT_DECL(int) PGMR0PhysMmio2DeregisterReq(PGVM pGVM, PPGMPHYSMMIO2DEREGISTERREQ pReq);
1421
1422/*
1423 * Request buffer for PGMR0PhysRomAllocateRangeReq / VMMR0_DO_PGM_PHYS_ROM_ALLOCATE_RANGE
1424 */
1425typedef struct PGMPHYSROMALLOCATERANGEREQ
1426{
1427 /** The header. */
1428 SUPVMMR0REQHDR Hdr;
1429 /** Input: the GUEST_PAGE_SIZE value (for validation). */
1430 uint32_t cbGuestPage;
1431 /** Input: Number of guest pages in the range. */
1432 uint32_t cGuestPages;
1433 /** Input: The ROM range ID (index) to be allocated. */
1434 uint32_t idRomRange;
1435 /** Input: The ROM range flags (PGMPHYS_ROM_FLAGS_XXX). */
1436 uint32_t fFlags;
1437} PGMPHYSROMALLOCATERANGEREQ;
1438/* Pointer to a PGMR0PhysRomAllocateRangeReq / VMMR0_DO_PGM_PHYS_ROM_ALLOCATE_RANGE request buffer. */
1439typedef PGMPHYSROMALLOCATERANGEREQ *PPGMPHYSROMALLOCATERANGEREQ;
1440
1441VMMR0_INT_DECL(int) PGMR0PhysRomAllocateRangeReq(PGVM pGVM, PPGMPHYSROMALLOCATERANGEREQ pReq);
1442
1443
1444/** @} */
1445
1446
1447
1448/** @defgroup grp_pgm_r3 The PGM Host Context Ring-3 API
1449 * @{
1450 */
1451#ifdef IN_RING3
1452VMMR3_INT_DECL(void) PGMR3EnableNemMode(PVM pVM);
1453VMMR3_INT_DECL(bool) PGMR3IsNemModeEnabled(PVM pVM);
1454VMMR3DECL(int) PGMR3Init(PVM pVM);
1455VMMR3DECL(int) PGMR3InitFinalize(PVM pVM);
1456VMMR3_INT_DECL(int) PGMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
1457VMMR3DECL(void) PGMR3Relocate(PVM pVM, RTGCINTPTR offDelta);
1458VMMR3DECL(void) PGMR3ResetCpu(PVM pVM, PVMCPU pVCpu);
1459VMMR3_INT_DECL(void) PGMR3Reset(PVM pVM);
1460VMMR3_INT_DECL(void) PGMR3ResetNoMorePhysWritesFlag(PVM pVM);
1461VMMR3_INT_DECL(void) PGMR3MemSetup(PVM pVM, bool fReset);
1462VMMR3DECL(int) PGMR3Term(PVM pVM);
1463
1464VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc);
1465VMMR3DECL(int) PGMR3PhysChangeMemBalloon(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage);
1466VMMR3DECL(int) PGMR3PhysWriteProtectRAM(PVM pVM);
1467VMMR3DECL(uint32_t) PGMR3PhysGetRamRangeCount(PVM pVM);
1468VMMR3DECL(int) PGMR3PhysGetRange(PVM pVM, uint32_t iRange, PRTGCPHYS pGCPhysStart, PRTGCPHYS pGCPhysLast,
1469 const char **ppszDesc, bool *pfIsMmio);
1470VMMR3_INT_DECL(int) PGMR3PhysGetRamBootZeroedRanges(PVM pVM, PPGMPHYSRANGES pRanges, uint32_t cMaxRanges);
1471VMMR3DECL(int) PGMR3QueryMemoryStats(PUVM pUVM, uint64_t *pcbTotalMem, uint64_t *pcbPrivateMem, uint64_t *pcbSharedMem, uint64_t *pcbZeroMem);
1472VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PUVM pUVM, uint64_t *pcbAllocMem, uint64_t *pcbFreeMem, uint64_t *pcbBallonedMem, uint64_t *pcbSharedMem);
1473
1474VMMR3_INT_DECL(int) PGMR3PhysMmioRegister(PVM pVM, PVMCPU pVCpu, RTGCPHYS cb, const char *pszDesc, uint16_t *pidRamRange);
1475VMMR3_INT_DECL(int) PGMR3PhysMmioMap(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTGCPHYS cb, uint16_t idRamRange,
1476 PGMPHYSHANDLERTYPE hType, uint64_t uUser);
1477VMMR3_INT_DECL(int) PGMR3PhysMmioUnmap(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTGCPHYS cb, uint16_t idRamRange);
1478#endif /* IN_RING3 */
1479
1480/** @name PGMPHYS_MMIO2_FLAGS_XXX - MMIO2 registration flags.
1481 * @see PGMR3PhysMmio2Register, PDMDevHlpMmio2Create
1482 * @{ */
1483/** Track dirty pages.
1484 * @see PGMR3PhysMmio2QueryAndResetDirtyBitmap(), PGMR3PhysMmio2ControlDirtyPageTracking(). */
1485#define PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES RT_BIT_32(0)
1486/** Valid flags. */
1487#define PGMPHYS_MMIO2_FLAGS_VALID_MASK UINT32_C(0x00000001)
1488/** @} */
1489
1490#ifdef IN_RING3
1491VMMR3_INT_DECL(int) PGMR3PhysMmio2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb,
1492 uint32_t fFlags, const char *pszDesc, void **ppv, PGMMMIO2HANDLE *phRegion);
1493VMMR3_INT_DECL(int) PGMR3PhysMmio2Deregister(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2);
1494VMMR3_INT_DECL(int) PGMR3PhysMmio2Map(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS GCPhys);
1495VMMR3_INT_DECL(int) PGMR3PhysMmio2Unmap(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS GCPhys);
1496VMMR3_INT_DECL(int) PGMR3PhysMmio2Reduce(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS cbRegion);
1497VMMR3_INT_DECL(int) PGMR3PhysMmio2ValidateHandle(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2);
1498VMMR3_INT_DECL(RTGCPHYS) PGMR3PhysMmio2GetMappingAddress(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2);
1499VMMR3_INT_DECL(int) PGMR3PhysMmio2ChangeRegionNo(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, uint32_t iNewRegion);
1500VMMR3_INT_DECL(int) PGMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2,
1501 void *pvBitmap, size_t cbBitmap);
1502VMMR3_INT_DECL(int) PGMR3PhysMmio2ControlDirtyPageTracking(PVM pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, bool fEnabled);
1503#endif /* IN_RING3 */
1504
1505/** @name PGMPHYS_ROM_FLAGS_XXX - ROM registration flags.
1506 * @see PGMR3PhysRegisterRom, PDMDevHlpROMRegister
1507 * @{ */
1508/** Inidicates that ROM shadowing should be enabled. */
1509#define PGMPHYS_ROM_FLAGS_SHADOWED UINT8_C(0x01)
1510/** Indicates that what pvBinary points to won't go away
1511 * and can be used for strictness checks. */
1512#define PGMPHYS_ROM_FLAGS_PERMANENT_BINARY UINT8_C(0x02)
1513/** Indicates that the ROM is allowed to be missing from saved state.
1514 * @note This is a hack for EFI, see @bugref{6940} */
1515#define PGMPHYS_ROM_FLAGS_MAYBE_MISSING_FROM_STATE UINT8_C(0x04)
1516/** Valid flags. */
1517#define PGMPHYS_ROM_FLAGS_VALID_MASK UINT8_C(0x07)
1518/** @} */
1519
1520#ifdef IN_RING3
1521VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
1522 const void *pvBinary, uint32_t cbBinary, uint8_t fFlags, const char *pszDesc);
1523VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt);
1524# if defined(VBOX_VMM_TARGET_X86) || defined(VBOX_VMM_TARGET_AGNOSTIC)
1525VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable);
1526# endif
1527
1528VMMR3_INT_DECL(int) PGMR3HandlerPhysicalTypeRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, uint32_t fFlags,
1529 PFNPGMPHYSHANDLER pfnHandlerR3, const char *pszDesc,
1530 PPGMPHYSHANDLERTYPE phType);
1531
1532VMMR3_INT_DECL(int) PGMR3PoolGrow(PVM pVM, PVMCPU pVCpu);
1533
1534VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv);
1535VMMR3DECL(uint8_t) PGMR3PhysReadU8(PVM pVM, RTGCPHYS GCPhys, PGMACCESSORIGIN enmOrigin);
1536VMMR3DECL(uint16_t) PGMR3PhysReadU16(PVM pVM, RTGCPHYS GCPhys, PGMACCESSORIGIN enmOrigin);
1537VMMR3DECL(uint32_t) PGMR3PhysReadU32(PVM pVM, RTGCPHYS GCPhys, PGMACCESSORIGIN enmOrigin);
1538VMMR3DECL(uint64_t) PGMR3PhysReadU64(PVM pVM, RTGCPHYS GCPhys, PGMACCESSORIGIN enmOrigin);
1539VMMR3DECL(void) PGMR3PhysWriteU8(PVM pVM, RTGCPHYS GCPhys, uint8_t Value, PGMACCESSORIGIN enmOrigin);
1540VMMR3DECL(void) PGMR3PhysWriteU16(PVM pVM, RTGCPHYS GCPhys, uint16_t Value, PGMACCESSORIGIN enmOrigin);
1541VMMR3DECL(void) PGMR3PhysWriteU32(PVM pVM, RTGCPHYS GCPhys, uint32_t Value, PGMACCESSORIGIN enmOrigin);
1542VMMR3DECL(void) PGMR3PhysWriteU64(PVM pVM, RTGCPHYS GCPhys, uint64_t Value, PGMACCESSORIGIN enmOrigin);
1543VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin);
1544VMMR3DECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin);
1545VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock);
1546VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock);
1547VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
1548 void **papvPages, PPGMPAGEMAPLOCK paLocks);
1549VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
1550 void const **papvPages, PPGMPAGEMAPLOCK paLocks);
1551VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM);
1552
1553VMMR3DECL(int) PGMR3CheckIntegrity(PVM pVM);
1554
1555VMMR3DECL(int) PGMR3DbgR3Ptr2GCPhys(PUVM pUVM, RTR3PTR R3Ptr, PRTGCPHYS pGCPhys);
1556VMMR3DECL(int) PGMR3DbgR3Ptr2HCPhys(PUVM pUVM, RTR3PTR R3Ptr, PRTHCPHYS pHCPhys);
1557VMMR3DECL(int) PGMR3DbgHCPhys2GCPhys(PUVM pUVM, RTHCPHYS HCPhys, PRTGCPHYS pGCPhys);
1558VMMR3_INT_DECL(int) PGMR3DbgReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb, uint32_t fFlags, size_t *pcbRead);
1559VMMR3_INT_DECL(int) PGMR3DbgWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb, uint32_t fFlags, size_t *pcbWritten);
1560VMMR3_INT_DECL(int) PGMR3DbgReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, uint32_t fFlags, size_t *pcbRead);
1561VMMR3_INT_DECL(int) PGMR3DbgWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, void const *pvSrc, size_t cb, uint32_t fFlags, size_t *pcbWritten);
1562VMMR3_INT_DECL(int) PGMR3DbgScanPhysical(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cbRange, RTGCPHYS GCPhysAlign, const uint8_t *pabNeedle, size_t cbNeedle, PRTGCPHYS pGCPhysHit);
1563VMMR3_INT_DECL(int) PGMR3DbgScanVirtual(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, RTGCPTR cbRange, RTGCPTR GCPtrAlign, const uint8_t *pabNeedle, size_t cbNeedle, PRTGCUINTPTR pGCPhysHit);
1564VMMR3_INT_DECL(int) PGMR3DumpHierarchyShw(PVM pVM, uint64_t cr3, uint32_t fFlags, uint64_t u64FirstAddr, uint64_t u64LastAddr, uint32_t cMaxDepth, PCDBGFINFOHLP pHlp);
1565VMMR3_INT_DECL(int) PGMR3DumpHierarchyGst(PVM pVM, uint64_t cr3, uint32_t fFlags, RTGCPTR FirstAddr, RTGCPTR LastAddr, uint32_t cMaxDepth, PCDBGFINFOHLP pHlp);
1566#endif /* IN_RING3 */
1567
1568/** @name Page sharing
1569 * @{ */
1570#ifdef IN_RING3
1571VMMR3DECL(int) PGMR3SharedModuleRegister(PVM pVM, VBOXOSFAMILY enmGuestOS, char *pszModuleName, char *pszVersion,
1572 RTGCPTR GCBaseAddr, uint32_t cbModule,
1573 uint32_t cRegions, VMMDEVSHAREDREGIONDESC const *paRegions);
1574VMMR3DECL(int) PGMR3SharedModuleUnregister(PVM pVM, char *pszModuleName, char *pszVersion,
1575 RTGCPTR GCBaseAddr, uint32_t cbModule);
1576VMMR3DECL(int) PGMR3SharedModuleCheckAll(PVM pVM);
1577VMMR3DECL(int) PGMR3SharedModuleGetPageState(PVM pVM, RTGCPTR GCPtrPage, bool *pfShared, uint64_t *pfPageFlags);
1578#endif /* IN_RING3 */
1579/** @} */
1580
1581/** @} */
1582
1583RT_C_DECLS_END
1584
1585/** @} */
1586#endif /* !VBOX_INCLUDED_vmm_pgm_h */
1587
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette